コード例 #1
0
ファイル: PL_sim.py プロジェクト: krufab/VisNav
	def main(self):
		veh_control.connect(local_connect())

		self.set_target_location(veh_control.get_location())

		while(veh_control.is_connected()):
		    location = veh_control.get_location()
		    attitude = veh_control.get_attitude()
		    
		    self.refresh_simulator(location,attitude)
		    frame = self.get_frame()
		    cv2.imshow('frame',frame)

		    key = cv2.waitKey(1)
		    print key

		    if key ==1113938:
		    	veh_control.set_velocity(2,0,0) #forward
		    elif key == 1113940:
		    	veh_control.set_velocity(-2,0,0) #backward
		    elif key == 1113937:
		    	veh_control.set_velocity(0,-2,0) #left
		    elif key ==1113939:
		    	veh_control.set_velocity(0,2,0) #right
		    elif(key == 1048690):
		    	yaw = math.degrees(attitude.yaw) #yaw left
		    	veh_control.set_yaw(yaw - 5)
		    elif(key == 1048692):
		    	yaw = math.degrees(attitude.yaw) #yaw right
		    	veh_control.set_yaw(yaw + 5)
		    elif(key == 1048677):
		    	veh_control.set_velocity(0,0,-2) #down
		    elif(key == 1048689):
		    	veh_control.set_velocity(0,0,2) #up
		    else:
				veh_control.set_velocity(0,0,0) #still
コード例 #2
0
ファイル: PL_sim.py プロジェクト: Aerobota/Precland
    def main(self):
        veh_control.connect(local_connect())

        self.set_target_location(veh_control.get_location())

        while (veh_control.is_connected()):
            location = veh_control.get_location()
            attitude = veh_control.get_attitude()

            self.refresh_simulator(location, attitude)
            frame = self.get_frame()
            cv2.imshow('frame', frame)

            key = cv2.waitKey(1)
            print key

            if key == 1113938:
                veh_control.set_velocity(2, 0, 0)  #forward
            elif key == 1113940:
                veh_control.set_velocity(-2, 0, 0)  #backward
            elif key == 1113937:
                veh_control.set_velocity(0, -2, 0)  #left
            elif key == 1113939:
                veh_control.set_velocity(0, 2, 0)  #right
            elif (key == 1048690):
                yaw = math.degrees(attitude.yaw)  #yaw left
                veh_control.set_yaw(yaw - 5)
            elif (key == 1048692):
                yaw = math.degrees(attitude.yaw)  #yaw right
                veh_control.set_yaw(yaw + 5)
            elif (key == 1048677):
                veh_control.set_velocity(0, 0, -2)  #down
            elif (key == 1048689):
                veh_control.set_velocity(0, 0, 2)  #up
            else:
                veh_control.set_velocity(0, 0, 0)  #still
コード例 #3
0
ファイル: PrecisionLand.py プロジェクト: senzai/Precland
    def run(self):

        VN_logger.text(VN_logger.GENERAL, "Running {0}".format(self.name()))

        # start a video capture
        if self.simulator:
            VN_logger.text(VN_logger.GENERAL, "Using simulator")
            sim.load_target(self.target_file, self.target_size)
            sim.set_target_location(veh_control.get_home(True))
            # sim.set_target_location(Location(0,0,0))

        else:
            VN_video.start_capture(self.camera_index)

        # create an image processor
        # detector = CircleDetector()
        detector = TopCode()

        # create a queue for images
        imageQueue = []

        # initilize autopilot variables
        location = Location(0, 0, 0)
        attitude = Attitude(0, 0, 0)

        while veh_control.is_connected():

            """
            #kill camera for testing
            if(cv2.waitKey(2) == 1113938):
                self.kill_camera =  not self.kill_camera
            """

            # we are in the landing zone or in a landing mode and we are still running the landing program
            # just because the program is running does not mean it controls the vehicle
            # i.e. in the landing area but not in a landing mode
            # FIXME add inside_landing_area() back to conditional
            if (self.always_run) or (veh_control.get_mode() == "LAND") or (veh_control.get_mode() == "RTL"):

                # update how often we dispatch a command
                VN_dispatcher.calculate_dispatch_schedule()

                # update simulator
                if self.simulator:
                    # get info from autopilot
                    location = veh_control.get_location()
                    attitude = veh_control.get_attitude()
                    sim.refresh_simulator(location, attitude)

                # grab an image
                frame = None
                capStart = current_milli_time()
                if self.simulator:
                    frame = sim.get_frame()
                else:
                    frame = VN_video.get_image()
                capStop = current_milli_time()

                """
                if(self.kill_camera):
                    frame[:] = (0,255,0)
                """

                # update capture time
                VN_dispatcher.update_capture_time(capStop - capStart)

                # Process image
                # We schedule the process as opposed to waiting for an available core
                # This brings consistancy and prevents overwriting a dead process before
                # information has been grabbed from the Pipe
                if VN_dispatcher.is_ready():
                    # queue the image for later use: displaying image, overlays, recording
                    imageQueue.append((frame, self.frames_captured))

                    # the function must be run directly from the class
                    VN_dispatcher.dispatch(target=detector.analyze_frame_async, args=(frame, self.frames_captured))

                    self.frames_captured += 1

                # retreive results
                if VN_dispatcher.is_available():

                    # results of image processor
                    results = VN_dispatcher.retreive()

                    # get image that was passed with the image processor
                    for f in imageQueue:
                        img, frame_id = f
                        if results[0] == frame_id:
                            imageQueue.remove(f)
                            break
                    VN_logger.text(VN_logger.GENERAL, "Frame {0}".format(frame_id))

                    # overlay gui
                    # rend_Image = gui.add_target_highlights(img, results[3])
                    rend_Image = gui.add_ring_highlights(img, results[4])

                    # show/record images
                    VN_logger.image(VN_logger.RAW, img)
                    VN_logger.image(VN_logger.GUI, rend_Image)

                    # display/log data
                    VN_logger.text(
                        VN_logger.ALGORITHM,
                        "RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}".format(
                            results[1], results[2], results[3], results[4]
                        ),
                    )
                    VN_logger.text(VN_logger.DEBUG, "Rings detected: {0}".format(len(results[4])))

                    # send results if we found the landing pad
                    if results[2] is not None:
                        # shift origin to center of the image
                        x_pixel = results[2][0] - (self.camera_width / 2.0)
                        y_pixel = results[2][1] - (
                            self.camera_height / 2.0
                        )  # y-axis is inverted??? Works with arducopter

                        # convert target location to angular radians
                        x_angle = x_pixel * (self.camera_hfov / self.camera_width) * (math.pi / 180.0)
                        y_angle = y_pixel * (self.camera_vfov / self.camera_height) * (math.pi / 180.0)

                        # send commands to autopilot
                        veh_control.report_landing_target(x_angle, y_angle, results[3], 0, 0)

            else:
                VN_logger.text(VN_logger.GENERAL, "Not in landing mode")

        # terminate program
        VN_logger.text(VN_logger.GENERAL, "Vehicle disconnected, Program Terminated")
        if self.simulator == False:
            VN_video.stop_capture()
コード例 #4
0
ファイル: PrecisionLand.py プロジェクト: hainh22/Precland
    def run(self):

        VN_logger.text(VN_logger.GENERAL, 'Running {0}'.format(self.name()))

        #start a video capture
        if (self.simulator):
            VN_logger.text(VN_logger.GENERAL, 'Using simulator')
            sim.load_target(self.target_file, self.target_size)
            sim.set_target_location(veh_control.get_home(True))
            #sim.set_target_location(Location(0,0,0))

        else:
            VN_video.start_capture(self.camera_index)

        #create an image processor
        #detector = CircleDetector()
        detector = TopCode()

        #create a queue for images
        imageQueue = []

        #initilize autopilot variables
        location = Location(0, 0, 0)
        attitude = Attitude(0, 0, 0)

        while veh_control.is_connected():
            '''
            #kill camera for testing
            if(cv2.waitKey(2) == 1113938):
                self.kill_camera =  not self.kill_camera
            '''

            #we are in the landing zone or in a landing mode and we are still running the landing program
            #just because the program is running does not mean it controls the vehicle
            #i.e. in the landing area but not in a landing mode
            #FIXME add inside_landing_area() back to conditional
            if (self.always_run) or (veh_control.get_mode()
                                     == "LAND") or (veh_control.get_mode()
                                                    == "RTL"):

                #update how often we dispatch a command
                VN_dispatcher.calculate_dispatch_schedule()

                #update simulator
                if (self.simulator):
                    #get info from autopilot
                    location = veh_control.get_location()
                    attitude = veh_control.get_attitude()
                    sim.refresh_simulator(location, attitude)

                # grab an image
                frame = None
                capStart = current_milli_time()
                if (self.simulator):
                    frame = sim.get_frame()
                else:
                    frame = VN_video.get_image()
                capStop = current_milli_time()
                '''
                if(self.kill_camera):
                    frame[:] = (0,255,0)
                '''

                #update capture time
                VN_dispatcher.update_capture_time(capStop - capStart)

                #Process image
                #We schedule the process as opposed to waiting for an available core
                #This brings consistancy and prevents overwriting a dead process before
                #information has been grabbed from the Pipe
                if VN_dispatcher.is_ready():
                    #queue the image for later use: displaying image, overlays, recording
                    imageQueue.append((frame, self.frames_captured))

                    #the function must be run directly from the class
                    VN_dispatcher.dispatch(target=detector.analyze_frame_async,
                                           args=(frame, self.frames_captured))

                    self.frames_captured += 1

                #retreive results
                if VN_dispatcher.is_available():

                    #results of image processor
                    results = VN_dispatcher.retreive()

                    # get image that was passed with the image processor
                    for f in imageQueue:
                        img, frame_id = f
                        if results[0] == frame_id:
                            imageQueue.remove(f)
                            break
                    VN_logger.text(VN_logger.GENERAL,
                                   'Frame {0}'.format(frame_id))

                    #overlay gui
                    #rend_Image = gui.add_target_highlights(img, results[3])
                    rend_Image = gui.add_ring_highlights(img, results[4])

                    #show/record images
                    VN_logger.image(VN_logger.RAW, img)
                    VN_logger.image(VN_logger.GUI, rend_Image)

                    #display/log data
                    VN_logger.text(
                        VN_logger.ALGORITHM,
                        'RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'
                        .format(results[1], results[2], results[3],
                                results[4]))
                    VN_logger.text(
                        VN_logger.DEBUG,
                        'Rings detected: {0}'.format(len(results[4])))

                    #send results if we found the landing pad
                    if (results[2] is not None):
                        #shift origin to center of the image
                        x_pixel = results[2][0] - (self.camera_width / 2.0)
                        y_pixel = results[2][1] - (
                            self.camera_height / 2.0
                        )  #y-axis is inverted??? Works with arducopter

                        #convert target location to angular radians
                        x_angle = x_pixel * (self.camera_hfov /
                                             self.camera_width) * (math.pi /
                                                                   180.0)
                        y_angle = y_pixel * (self.camera_vfov /
                                             self.camera_height) * (math.pi /
                                                                    180.0)

                        #send commands to autopilot
                        veh_control.report_landing_target(
                            x_angle, y_angle, results[3], 0, 0)

            else:
                VN_logger.text(VN_logger.GENERAL, 'Not in landing mode')

        #terminate program
        VN_logger.text(VN_logger.GENERAL,
                       'Vehicle disconnected, Program Terminated')
        if (self.simulator == False):
            VN_video.stop_capture()