示例#1
0
    def run(self):

        VN_logger.text(VN_logger.GENERAL, "Running {0}".format(self.name()))

        # start a video capture
        if self.simulator:
            VN_logger.text(VN_logger.GENERAL, "Using simulator")
            sim.load_target(self.target_file, self.target_size)
            sim.set_target_location(veh_control.get_home(True))
            # sim.set_target_location(Location(0,0,0))

        else:
            VN_video.start_capture(self.camera_index)

        # create an image processor
        # detector = CircleDetector()
        detector = TopCode()

        # create a queue for images
        imageQueue = []

        # initilize autopilot variables
        location = Location(0, 0, 0)
        attitude = Attitude(0, 0, 0)

        while veh_control.is_connected():

            """
            #kill camera for testing
            if(cv2.waitKey(2) == 1113938):
                self.kill_camera =  not self.kill_camera
            """

            # we are in the landing zone or in a landing mode and we are still running the landing program
            # just because the program is running does not mean it controls the vehicle
            # i.e. in the landing area but not in a landing mode
            # FIXME add inside_landing_area() back to conditional
            if (self.always_run) or (veh_control.get_mode() == "LAND") or (veh_control.get_mode() == "RTL"):

                # update how often we dispatch a command
                VN_dispatcher.calculate_dispatch_schedule()

                # update simulator
                if self.simulator:
                    # get info from autopilot
                    location = veh_control.get_location()
                    attitude = veh_control.get_attitude()
                    sim.refresh_simulator(location, attitude)

                # grab an image
                frame = None
                capStart = current_milli_time()
                if self.simulator:
                    frame = sim.get_frame()
                else:
                    frame = VN_video.get_image()
                capStop = current_milli_time()

                """
                if(self.kill_camera):
                    frame[:] = (0,255,0)
                """

                # update capture time
                VN_dispatcher.update_capture_time(capStop - capStart)

                # Process image
                # We schedule the process as opposed to waiting for an available core
                # This brings consistancy and prevents overwriting a dead process before
                # information has been grabbed from the Pipe
                if VN_dispatcher.is_ready():
                    # queue the image for later use: displaying image, overlays, recording
                    imageQueue.append((frame, self.frames_captured))

                    # the function must be run directly from the class
                    VN_dispatcher.dispatch(target=detector.analyze_frame_async, args=(frame, self.frames_captured))

                    self.frames_captured += 1

                # retreive results
                if VN_dispatcher.is_available():

                    # results of image processor
                    results = VN_dispatcher.retreive()

                    # get image that was passed with the image processor
                    for f in imageQueue:
                        img, frame_id = f
                        if results[0] == frame_id:
                            imageQueue.remove(f)
                            break
                    VN_logger.text(VN_logger.GENERAL, "Frame {0}".format(frame_id))

                    # overlay gui
                    # rend_Image = gui.add_target_highlights(img, results[3])
                    rend_Image = gui.add_ring_highlights(img, results[4])

                    # show/record images
                    VN_logger.image(VN_logger.RAW, img)
                    VN_logger.image(VN_logger.GUI, rend_Image)

                    # display/log data
                    VN_logger.text(
                        VN_logger.ALGORITHM,
                        "RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}".format(
                            results[1], results[2], results[3], results[4]
                        ),
                    )
                    VN_logger.text(VN_logger.DEBUG, "Rings detected: {0}".format(len(results[4])))

                    # send results if we found the landing pad
                    if results[2] is not None:
                        # shift origin to center of the image
                        x_pixel = results[2][0] - (self.camera_width / 2.0)
                        y_pixel = results[2][1] - (
                            self.camera_height / 2.0
                        )  # y-axis is inverted??? Works with arducopter

                        # convert target location to angular radians
                        x_angle = x_pixel * (self.camera_hfov / self.camera_width) * (math.pi / 180.0)
                        y_angle = y_pixel * (self.camera_vfov / self.camera_height) * (math.pi / 180.0)

                        # send commands to autopilot
                        veh_control.report_landing_target(x_angle, y_angle, results[3], 0, 0)

            else:
                VN_logger.text(VN_logger.GENERAL, "Not in landing mode")

        # terminate program
        VN_logger.text(VN_logger.GENERAL, "Vehicle disconnected, Program Terminated")
        if self.simulator == False:
            VN_video.stop_capture()
示例#2
0

					readings += 1

		#can not decode target
		if(readings == 0):
			return -1
		#average all distance readings
		return distance/(readings * 1.0)


if __name__ == "__main__":

	cam = cv2.VideoCapture(0)
	cam = flow_cam
	detector = CircleDetector()

	if cam is not None:
		while True:
			ret, img = cam.read()
			if(img is not None and ret == True):
				results = detector.analyze_frame(img)
				rend_Image = gui.add_target_highlights(img, results[3])
				#show results
				cv2.imshow('gui', rend_Image)
				cv2.waitKey(1)
				print ('RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.format(results[0],results[1],results[2],results[3]))

			else:
				print "failed to grab image"
示例#3
0
                    readings += 1

        #can not decode target
        if (readings == 0):
            return -1
        #average all distance readings
        return distance / (readings * 1.0)


if __name__ == "__main__":

    cam = cv2.VideoCapture(0)
    cam = flow_cam
    detector = CircleDetector()

    if cam is not None:
        while True:
            ret, img = cam.read()
            if (img is not None and ret == True):
                results = detector.analyze_frame(img)
                rend_Image = gui.add_target_highlights(img, results[3])
                #show results
                cv2.imshow('gui', rend_Image)
                cv2.waitKey(1)
                print('RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.
                      format(results[0], results[1], results[2], results[3]))

            else:
                print "failed to grab image"
示例#4
0
#video source
cam = ImageReader(args.input)

file_count = len([f for f in os.walk(args.input).next()[2] if f[-4:] == ".bmp"])
key = np.empty((file_count),object)
print "total frames", file_count


frame_id = 0
if cam is not None and cam.isOpened():
    while True:
        ret, img = cam.peek(frame_id)
        if(img is not None and ret == True):

            img = gui.add_stats(img, str(frame_id) + '/' + str(file_count),10,10)
            cv2.imshow('Image', img)
            k = cv2.waitKey(0)

            if k == 65362: #upkey
                key[frame_id] = 1.0
                frame_id += 1
                print frame_id, "Present"
            elif k == 65364: #down key
                key[frame_id] = 0.0
                frame_id += 1
                print frame_id, "Not present"
            elif k == 65363: #right key
                key[frame_id] = 0.5
                frame_id += 1
                print frame_id, "Partial"
示例#5
0
            img *= (255.0 / (range_v))
            img /= res_reduction
            img *= res_reduction
            return img
        else:
            return np.zeros((img.shape[0], img.shape[1]), np.uint8)


if __name__ == "__main__":

    #cam = cv2.VideoCapture(0)
    cam = flow_cam
    detector = TopCode()

    if cam is not None:
        while True:
            ret, img = cam.read()
            if (img is not None and ret == True):
                results = detector.analyze_frame(img)
                print results[1]

                rend_Image = gui.add_ring_highlights(img, results[3])
                #show results
                cv2.imshow('gui', rend_Image)
                cv2.waitKey(1)
                print('RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.
                      format(results[0], results[1], results[2], results[3]))

            else:
                print "failed to grab image"
示例#6
0
    def run(self):

        VN_logger.text(VN_logger.GENERAL, 'Running {0}'.format(self.name()))

        #start a video capture
        if (self.simulator):
            VN_logger.text(VN_logger.GENERAL, 'Using simulator')
            sim.load_target(self.target_file, self.target_size)
            sim.set_target_location(veh_control.get_home(True))
            #sim.set_target_location(Location(0,0,0))

        else:
            VN_video.start_capture(self.camera_index)

        #create an image processor
        #detector = CircleDetector()
        detector = TopCode()

        #create a queue for images
        imageQueue = []

        #initilize autopilot variables
        location = Location(0, 0, 0)
        attitude = Attitude(0, 0, 0)

        while veh_control.is_connected():
            '''
            #kill camera for testing
            if(cv2.waitKey(2) == 1113938):
                self.kill_camera =  not self.kill_camera
            '''

            #we are in the landing zone or in a landing mode and we are still running the landing program
            #just because the program is running does not mean it controls the vehicle
            #i.e. in the landing area but not in a landing mode
            #FIXME add inside_landing_area() back to conditional
            if (self.always_run) or (veh_control.get_mode()
                                     == "LAND") or (veh_control.get_mode()
                                                    == "RTL"):

                #update how often we dispatch a command
                VN_dispatcher.calculate_dispatch_schedule()

                #update simulator
                if (self.simulator):
                    #get info from autopilot
                    location = veh_control.get_location()
                    attitude = veh_control.get_attitude()
                    sim.refresh_simulator(location, attitude)

                # grab an image
                frame = None
                capStart = current_milli_time()
                if (self.simulator):
                    frame = sim.get_frame()
                else:
                    frame = VN_video.get_image()
                capStop = current_milli_time()
                '''
                if(self.kill_camera):
                    frame[:] = (0,255,0)
                '''

                #update capture time
                VN_dispatcher.update_capture_time(capStop - capStart)

                #Process image
                #We schedule the process as opposed to waiting for an available core
                #This brings consistancy and prevents overwriting a dead process before
                #information has been grabbed from the Pipe
                if VN_dispatcher.is_ready():
                    #queue the image for later use: displaying image, overlays, recording
                    imageQueue.append((frame, self.frames_captured))

                    #the function must be run directly from the class
                    VN_dispatcher.dispatch(target=detector.analyze_frame_async,
                                           args=(frame, self.frames_captured))

                    self.frames_captured += 1

                #retreive results
                if VN_dispatcher.is_available():

                    #results of image processor
                    results = VN_dispatcher.retreive()

                    # get image that was passed with the image processor
                    for f in imageQueue:
                        img, frame_id = f
                        if results[0] == frame_id:
                            imageQueue.remove(f)
                            break
                    VN_logger.text(VN_logger.GENERAL,
                                   'Frame {0}'.format(frame_id))

                    #overlay gui
                    #rend_Image = gui.add_target_highlights(img, results[3])
                    rend_Image = gui.add_ring_highlights(img, results[4])

                    #show/record images
                    VN_logger.image(VN_logger.RAW, img)
                    VN_logger.image(VN_logger.GUI, rend_Image)

                    #display/log data
                    VN_logger.text(
                        VN_logger.ALGORITHM,
                        'RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'
                        .format(results[1], results[2], results[3],
                                results[4]))
                    VN_logger.text(
                        VN_logger.DEBUG,
                        'Rings detected: {0}'.format(len(results[4])))

                    #send results if we found the landing pad
                    if (results[2] is not None):
                        #shift origin to center of the image
                        x_pixel = results[2][0] - (self.camera_width / 2.0)
                        y_pixel = results[2][1] - (
                            self.camera_height / 2.0
                        )  #y-axis is inverted??? Works with arducopter

                        #convert target location to angular radians
                        x_angle = x_pixel * (self.camera_hfov /
                                             self.camera_width) * (math.pi /
                                                                   180.0)
                        y_angle = y_pixel * (self.camera_vfov /
                                             self.camera_height) * (math.pi /
                                                                    180.0)

                        #send commands to autopilot
                        veh_control.report_landing_target(
                            x_angle, y_angle, results[3], 0, 0)

            else:
                VN_logger.text(VN_logger.GENERAL, 'Not in landing mode')

        #terminate program
        VN_logger.text(VN_logger.GENERAL,
                       'Vehicle disconnected, Program Terminated')
        if (self.simulator == False):
            VN_video.stop_capture()
示例#7
0
			img -= min_v
			img *= (255.0/(range_v))
			img /= res_reduction
			img *= res_reduction
			return img
		else:
			return np.zeros((img.shape[0],img.shape[1]), np.uint8)


if __name__ == "__main__":

	#cam = cv2.VideoCapture(0)
	cam = flow_cam
	detector = TopCode()

	if cam is not None:
		while True:
			ret, img = cam.read()
			if(img is not None and ret == True):
				results = detector.analyze_frame(img)
				print results[1]

				rend_Image = gui.add_ring_highlights(img, results[3])
				#show results
				cv2.imshow('gui', rend_Image)
				cv2.waitKey(1)
				print ('RunTime: {0} Center: {1} Distance: {2} Raw Target: {3}'.format(results[0],results[1],results[2],results[3]))

			else:
				print "failed to grab image"
示例#8
0
#video source
cam = ImageReader(args.input)

file_count = len(
    [f for f in os.walk(args.input).next()[2] if f[-4:] == ".bmp"])
key = np.empty((file_count), object)
print "total frames", file_count

frame_id = 0
if cam is not None and cam.isOpened():
    while True:
        ret, img = cam.peek(frame_id)
        if (img is not None and ret == True):

            img = gui.add_stats(img,
                                str(frame_id) + '/' + str(file_count), 10, 10)
            cv2.imshow('Image', img)
            k = cv2.waitKey(0)

            if k == 65362:  #upkey
                key[frame_id] = 1.0
                frame_id += 1
                print frame_id, "Present"
            elif k == 65364:  #down key
                key[frame_id] = 0.0
                frame_id += 1
                print frame_id, "Not present"
            elif k == 65363:  #right key
                key[frame_id] = 0.5
                frame_id += 1
                print frame_id, "Partial"