Пример #1
0
def main():
    toFind = [5]

    if PublishToROS:
        RP = RosPublisher(toFind)

    cd = CameraDriver(toFind, defaultKernelSize=15, cameraNumber=0)
    signal.signal(signal.SIGINT, cd.signalHandler)
    pointLocationsInImage = [[325, 0], [1045, 0], [1045, 720], [325, 720]]
    realCoordinates = [[0, 0], [300, 0], [300, 300], [0, 300]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage,
                                                realCoordinates)

    while cd.running:
        cd.getImage()
        cd.processFrame()
        y = cd.returnPositions()

        if PublishToROS:
            RP.publishMarkerLocations(perspectiveConverter.convertPose(y[0]))
            cd.publishImageFrame(RP)
        else:
            cd.drawDetectedMarkers()
            cd.handleKeyboardEvents()
            cd.showProcessedFrame()
        #poseCorrected = perspectiveConverter.convertPose(y[0])

#print "x: ", poseCorrected.x, " y: ", poseCorrected.y

    print("Stopping")
Пример #2
0
def main():
    toFind = [5]

    if PublishToROS:  
        RP = RosPublisher(toFind)
       
    cd = CameraDriver(toFind, defaultKernelSize = 15, cameraNumber = 0) 
    signal.signal(signal.SIGINT, cd.signalHandler)
    pointLocationsInImage = [[325, 0], [1045, 0], [1045, 720], [325, 720]]
    realCoordinates = [[0, 0], [300, 0], [300, 300], [0, 300]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
 
    while cd.running:
        cd.getImage()
        cd.processFrame()
	y = cd.returnPositions() 
	
	if PublishToROS: 
		RP.publishMarkerLocations(perspectiveConverter.convertPose(y[0]))
	    	cd.publishImageFrame(RP)
	else:
        	cd.drawDetectedMarkers()
	        cd.handleKeyboardEvents()
	        cd.showProcessedFrame()
           	#poseCorrected = perspectiveConverter.convertPose(y[0])
	    	#print "x: ", poseCorrected.x, " y: ", poseCorrected.y
        
    print("Stopping")
Пример #3
0
def main():

    t0 = time()
    t1 = time()
    t2 = time()

    print "function vers1 takes %f" % (t1 - t0)
    print "function vers2 takes %f" % (t2 - t1)

    toFind = [4, 6]

    if PublishToROS:
        RP = RosPublisher(toFind)

    # cd = CameraDriver(toFind, defaultKernelSize = 25) # Best in robolab.
    cd = CameraDriver(toFind, defaultKernelSize=25)
    t0 = time()

    #    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    #    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]

    pointLocationsInImage = [[952, 533], [946, 3], [1668, 3], [1667, 527]]
    realCoordinates = [[0, 0], [0, 109], [150.1, 109], [150.1, 0]]

    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)

    while cd.running:
        (t1, t0) = (t0, time())
        # print "time for one iteration: %f" % (t0 - t1)
        cd.getImage()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.showProcessedFrame()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()
        if PublishToROS:
            RP.publishMarkerLocations(y)
        else:
            pass
            # print y
            for k in range(len(y)):
                try:
                    poseCorrected = perspectiveConverter.convertPose(y[k])
                    print (
                        "%8.3f %8.3f %8.3f %8.3f %s"
                        % (
                            poseCorrected.x,
                            poseCorrected.y,
                            poseCorrected.theta,
                            poseCorrected.quality,
                            poseCorrected.order,
                        )
                    )
                    # print("%3d %3d %8.3f" % (y[0][0], y[0][1], y[0][2]))
                except:
                    pass

    print ("Stopping")
Пример #4
0
def main():

    t0 = time()
    t1 = time()
    t2 = time()

    print 'function vers1 takes %f' % (t1 - t0)
    print 'function vers2 takes %f' % (t2 - t1)

    toFind = [6]

    if PublishToROS:
        RP = RosPublisher(toFind)

    #cd = CameraDriver(toFind, defaultKernelSize = 25) # Best in robolab.


#    cd = CameraDriver(toFind, defaultKernelSize = 25, cameraDevice="drone_flight.mkv")
    cd = CameraDriver(
        toFind,
        defaultKernelSize=25,
        cameraDevice="recording_flight_with_5_marker_afternoon.mkv")
    #    cd = CameraDriver(toFind, defaultKernelSize = 25, cameraDevice="square.mkv")
    t0 = time()

    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage,
                                                realCoordinates)

    while cd.running:
        (t1, t0) = (t0, time())
        #print "time for one iteration: %f" % (t0 - t1)
        cd.getImage()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.showProcessedFrame()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()
        if PublishToROS:
            RP.publishMarkerLocations(y)
            cd.publishImageFrame(RP)
        else:
            pass
            #print y
            for k in range(len(y)):
                try:
                    poseCorrected = perspectiveConverter.convertPose(y[k])
                    print(
                        "x: %8.3f y: %8.3f angle: %8.3f quality: %8.3f order: %s"
                        %
                        (poseCorrected.x, poseCorrected.y, poseCorrected.theta,
                         poseCorrected.quality, poseCorrected.order))
                    #print("%3d %3d %8.3f" % (y[0][0], y[0][1], y[0][2]))
                except:
                    pass
    print("Stopping")
Пример #5
0
def main():

    t0 = time()
    t1 = time()
    t2 = time()

    print 'function vers1 takes %f' %(t1-t0)
    print 'function vers2 takes %f' %(t2-t1)
    
    toFind = [6]

    if PublishToROS:
        RP = RosPublisher(toFind)

    #cd = CameraDriver(toFind, defaultKernelSize = 25) # Best in robolab.
#    cd = CameraDriver(toFind, defaultKernelSize = 25, cameraDevice="drone_flight.mkv")
    cd = CameraDriver(toFind, defaultKernelSize = 25, cameraDevice="recording_flight_with_5_marker_afternoon.mkv")
#    cd = CameraDriver(toFind, defaultKernelSize = 25, cameraDevice="square.mkv")
    t0 = time()
     

    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
     
    while cd.running:
        (t1, t0) = (t0, time())
        #print "time for one iteration: %f" % (t0 - t1)
        cd.getImage()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.showProcessedFrame()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()
        if PublishToROS:
            RP.publishMarkerLocations(y)
            cd.publishImageFrame(RP)
        else:
            pass
            #print y
            for k in range(len(y)):
                try:
                    poseCorrected = perspectiveConverter.convertPose(y[k])
                    print("x: %8.3f y: %8.3f angle: %8.3f quality: %8.3f order: %s" % (poseCorrected.x, poseCorrected.y, poseCorrected.theta, poseCorrected.quality, poseCorrected.order))
                    #print("%3d %3d %8.3f" % (y[0][0], y[0][1], y[0][2]))
                except:
                    pass
    print("Stopping")
Пример #6
0
def main():

    if publish_to_ros:
        ros_publisher = RosPublisher(list_of_markers_to_find,
                                     markerpose_ros_topic)

    ld = LocatorDriver(list_of_markers_to_find,
                       default_kernel_size=14,
                       scaling_parameter=1000,
                       downscale_factor=2)
    # ld = ImageDriver(list_of_markers_to_find, defaultKernelSize = 21)
    t0 = time()

    # Calibration of setup in robolab, so that the
    # coordinates correspond to real world coordinates.
    reference_point_locations_in_image = [[1328, 340], [874, 346], [856, 756],
                                          [1300, 762]]
    reference_point_loc_in_world_coord = [[0, 0], [300, 0], [300, 250],
                                          [0, 250]]
    persp_corr = PerspectiveCorrecter(reference_point_locations_in_image,
                                      reference_point_loc_in_world_coord)

    while ld.running and stop_flag is False:
        (t1, t0) = (t0, time())
        if print_debug_messages is True:
            print "time for one iteration: %f" % (t0 - t1)
        ld.get_image()
        ld.process_frame()
        ld.draw_detected_markers()
        ld.show_processed_frame()
        ld.handle_keyboard_events()
        y = ld.return_positions()
        if publish_to_ros:

            w, h = ld.get_image_size()

            ros_publisher.publish_marker_locations(y, w, h, ld)
        else:
            for k in range(len(y)):
                try:
                    pose_corr = persp_corr.convertPose(y[k])
                    print("%8.3f %8.3f %8.3f %8.3f %s" %
                          (pose_corr.x, pose_corr.y, pose_corr.theta,
                           pose_corr.quality, pose_corr.order))
                except Exception as e:
                    print("%s" % e)

    print("Stopping")
Пример #7
0
    def __init__(self):
        # Instantiate ros publisher with information about the markers that
        # will be tracked.

        rospy.init_node('FrobitLocator')
	self.toFind = rospy.get_param( '~to_find', 4)
	rospy.logwarn("Looking for drone order: %d" % self.toFind);
        self.pub = None
#        self.markers = self.toFind
        self.bridge = CvBridge()


	pose_out = rospy.get_param( '~pose_out', 'pose')
        self.pub = rospy.Publisher(pose_out, DronePose, queue_size = 10)

        self.imagePub = rospy.Publisher("imagePublisher", Image, queue_size=10)

	topic_camera_in = rospy.get_param( '~camera_in', '/camera/stream')
	self.image_sub = rospy.Subscriber(topic_camera_in,Image,self.callbackFrame)

	self.newFrame = False
	self.frame = None


	self.cd = CameraDriver(self.toFind, defaultKernelSize = 25)
     

#	pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
#	realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
	pointLocationsInImage = [[952, 533], [946, 3], [1668, 3], [1667, 527]]
#	pointLocationsInImage =  [[1668, 3], [1667, 527], [952, 533], [946, 3]]
	realCoordinates = [[0, 0], [0, 109], [150.1, 109], [150.1, 0]]
	

	self.perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
def main():

    createLogFile = False 
    createLogPictures = False

    if(createLogFile):
	    fileName = strftime("%m-%d-%Y %H:%M:%S")
	    textFile = open("log/%s.txt" % fileName, "w")

    toFind = [5]
    cd = CameraDriver(toFind, defaultKernelSize = 21, cameraNumber = 0)
     
    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
    print('Running..')

    imageCounter = 0
     
    while cd.running:
        cd.getImage()

        timestamp = time()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()     
        for k in range(len(y)):
            try:
                poseCorrected = perspectiveConverter.convertPose(y[k])
		if(createLogFile):
			textFile.write(poseToString(poseCorrected, timestamp) + "\n")  
		if(createLogPictures):
			#if(imageCounter>0):
				cd.savePicture() 
				#imageCounter = 0    
			#else:      
				#imageCounter = imageCounter+1
            except:
                pass

	cd.showProcessedFrame()

    if(createLogFile):
	    textFile.close()            
    print("Stopping..")
	def __init__(self):
		rospy.init_node('MarkerLocator')

		self.rate = rospy.Rate(10) # Hz

		# needed to convert image
		self.bridge = CvBridge()

		# read topic names
		markerimage_sub_topic = rospy.get_param("~markerimage_sub",'/markerlocator/image_raw')
		markerpose_pub_topic = rospy.get_param("~markerpose_pub",'/markerlocator/markerpose')

		# read parameters
		self.show_image = rospy.get_param("~show_image", False)
		self.print_debug_messages = rospy.get_param("~print_debug_messages", False)
		down_scale_factor = rospy.get_param("/image_downscale_factor", 1.0)
		self.marker_order = rospy.get_param("~marker_order", 0)
		marker_size = rospy.get_param("~marker_size", 0) / down_scale_factor
		calib_a_wld = [rospy.get_param("/calibrate_a_world_x", 0), rospy.get_param("/calibrate_a_world_y", 0)]
		calib_a_img = [rospy.get_param("/calibrate_a_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_a_image_y", 0) / down_scale_factor]
		calib_b_wld = [rospy.get_param("/calibrate_b_world_x", 0), rospy.get_param("/calibrate_b_world_y", 0)]
		calib_b_img = [rospy.get_param("/calibrate_b_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_b_image_y", 0) / down_scale_factor]
		calib_c_wld = [rospy.get_param("/calibrate_c_world_x", 0), rospy.get_param("/calibrate_c_world_y", 0)]
		calib_c_img = [rospy.get_param("/calibrate_c_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_c_image_y", 0) / down_scale_factor]
		calib_d_wld = [rospy.get_param("/calibrate_d_world_x", 0), rospy.get_param("/calibrate_d_world_y", 0)]
		calib_d_img = [rospy.get_param("/calibrate_d_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_d_image_y", 0) / down_scale_factor]

		# static parameters
		scaling_parameter = 1000 # only for (debug) plotting purposes

		# Calibration of setup, so that the coordinates correspond to real world coordinates.
		calib_pts_image = [calib_a_img, calib_b_img, calib_c_img, calib_d_img]
		calib_pts_world = [calib_a_wld, calib_b_wld, calib_c_wld, calib_d_wld]
		self.perspective_corrector = PerspectiveCorrecter(calib_pts_image, calib_pts_world)
		if self.print_debug_messages:
			print 'Calibration points image:', calib_pts_image
			print 'Calibration points world:', calib_pts_world

		# instantiate camera driver
		self.cd = CameraDriver(self.marker_order, marker_size, scaling_parameter)
		if self.show_image:
			self.cd.open_image_window()

		# instantiate markerpose publisher
		self.markerpose_msg = markerpose()
		self.markerpose_pub = rospy.Publisher(markerpose_pub_topic, markerpose, queue_size=0)

		# instantiate image subscribers
		self.time_prev_image = time()
		rospy.Subscriber(markerimage_sub_topic, Image, self.on_new_image)

		# call updater routine
		self.updater()  
Пример #10
0
def main():

    if publish_to_ros:
        ros_publisher = RosPublisher(list_of_markers_to_find, markerpose_ros_topic)

    cd = CameraDriver(list_of_markers_to_find, default_kernel_size=55, scaling_parameter=1000, downscale_factor=2)  # Best in robolab.
    # cd = ImageDriver(list_of_markers_to_find, defaultKernelSize = 21)
    t0 = time()

    # Calibration of setup in robolab, so that the coordinates correspond to real world coordinates.
    reference_point_locations_in_image = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    reference_point_locations_in_world_coordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspective_corrector = PerspectiveCorrecter(reference_point_locations_in_image,
                                                 reference_point_locations_in_world_coordinates)

    while cd.running and stop_flag is False:
        (t1, t0) = (t0, time())
        if print_debug_messages is True:
            print "time for one iteration: %f" % (t0 - t1)
        cd.get_image()
        cd.process_frame()
        cd.draw_detected_markers()
        cd.show_processed_frame()
        cd.handle_keyboard_events()
        y = cd.return_positions()
        if publish_to_ros:
            ros_publisher.publish_marker_locations(y)
        else:
            for k in range(len(y)):
                try:
                    pose_corrected = perspective_corrector.convertPose(y[k])
                    print("%8.3f %8.3f %8.3f %8.3f %s" % (pose_corrected.x,
                                                          pose_corrected.y,
                                                          pose_corrected.theta,
                                                          pose_corrected.quality,
                                                          pose_corrected.order))
                except Exception as e:
                    print("%s" % e)

    print("Stopping")
def main():
    t0 = time()
    t1 = time()

    toFind = [4, 5, 6, 7]
    #toFind = [7]
    # 4 Is not optimal, can be confused by 5
    # 5 works fine

    #cd = CameraDriver(toFind, defaultKernelSize = 25) # Best in robolab.
    #cd = CameraDriver(toFind, defaultKernelSize = 21)
    cd = CameraDriver(toFind, defaultKernelSize=21)
    t0 = time()

    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage,
                                                realCoordinates)

    while cd.running:
        (t1, t0) = (t0, time())
        #print "time for one iteration: %f" % (t0 - t1)
        cd.getImage()
        timestamp = time()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.showProcessedFrame()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()
        for k in range(len(y)):
            try:
                poseCorrected = perspectiveConverter.convertPose(y[k])
                #print("%8.3f %8.3f %8.3f %8.3f %s" % (poseCorrected.x, poseCorrected.y, poseCorrected.theta, poseCorrected.quality, poseCorrected.order))
                print(poseToString(poseCorrected, timestamp))
                #print("%3d %3d %8.3f" % (y[0][0], y[0][1], y[0][2]))
            except:
                pass

    print("Stopping")
def main():
    t0 = time()
    t1 = time()

    toFind = [4, 5, 6, 7]
    #toFind = [7] 
    # 4 Is not optimal, can be confused by 5
    # 5 works fine
      
    #cd = CameraDriver(toFind, defaultKernelSize = 25) # Best in robolab.
    #cd = CameraDriver(toFind, defaultKernelSize = 21)
    cd = CameraDriver(toFind, defaultKernelSize = 21)
    t0 = time()
     
    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
     
    while cd.running:
        (t1, t0) = (t0, time())
        #print "time for one iteration: %f" % (t0 - t1)
        cd.getImage()
        timestamp = time()
        cd.processFrame()
        cd.drawDetectedMarkers()
        cd.showProcessedFrame()
        cd.handleKeyboardEvents()
        y = cd.returnPositions()     
        for k in range(len(y)):
            try:
                poseCorrected = perspectiveConverter.convertPose(y[k])
                #print("%8.3f %8.3f %8.3f %8.3f %s" % (poseCorrected.x, poseCorrected.y, poseCorrected.theta, poseCorrected.quality, poseCorrected.order))
                print(poseToString(poseCorrected, timestamp))                
                #print("%3d %3d %8.3f" % (y[0][0], y[0][1], y[0][2]))
            except:
                pass
            
    print("Stopping")
def main():


    toFind = [5]
    cd = CameraDriver(toFind, defaultKernelSize = 21, cameraNumber = 0)
     
    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)
     
    while cd.running:
        cd.getImage()

        timestamp = time()
        cd.processFrame()
        #cd.drawDetectedMarkers()
        #cd.handleKeyboardEvents()
        y = cd.returnPositions()     
        for k in range(len(y)):
            try:
                poseCorrected = perspectiveConverter.convertPose(y[k])
		print "x: ", poseCorrected.x, " y:", poseCorrected.y, " timestamp: ", timestamp
            except:
                pass
def main():

    toFind = [5]
    cd = CameraDriver(toFind, defaultKernelSize=21, cameraNumber=0)

    pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
    realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
    perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage,
                                                realCoordinates)

    while cd.running:
        cd.getImage()

        timestamp = time()
        cd.processFrame()
        #cd.drawDetectedMarkers()
        #cd.handleKeyboardEvents()
        y = cd.returnPositions()
        for k in range(len(y)):
            try:
                poseCorrected = perspectiveConverter.convertPose(y[k])
                print "x: ", poseCorrected.x, " y:", poseCorrected.y, " timestamp: ", timestamp
            except:
                pass
Пример #15
0
class ROS:
    def __init__(self):
        # Instantiate ros publisher with information about the markers that
        # will be tracked.

        rospy.init_node('FrobitLocator')
	self.toFind = rospy.get_param( '~to_find', 4)
	rospy.logwarn("Looking for drone order: %d" % self.toFind);
        self.pub = None
#        self.markers = self.toFind
        self.bridge = CvBridge()


	pose_out = rospy.get_param( '~pose_out', 'pose')
        self.pub = rospy.Publisher(pose_out, DronePose, queue_size = 10)

        self.imagePub = rospy.Publisher("imagePublisher", Image, queue_size=10)

	topic_camera_in = rospy.get_param( '~camera_in', '/camera/stream')
	self.image_sub = rospy.Subscriber(topic_camera_in,Image,self.callbackFrame)

	self.newFrame = False
	self.frame = None


	self.cd = CameraDriver(self.toFind, defaultKernelSize = 25)
     

#	pointLocationsInImage = [[1328, 340], [874, 346], [856, 756], [1300, 762]]
#	realCoordinates = [[0, 0], [300, 0], [300, 250], [0, 250]]
	pointLocationsInImage = [[952, 533], [946, 3], [1668, 3], [1667, 527]]
#	pointLocationsInImage =  [[1668, 3], [1667, 527], [952, 533], [946, 3]]
	realCoordinates = [[0, 0], [0, 109], [150.1, 109], [150.1, 0]]
	

	self.perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)

#    while cd.running:
#	pass
#        cd.getImage()
#	if RP.newFrame:
#		RP.newFrame = False

    def callbackFrame(self, frame):
	try:
		source = self.bridge.imgmsg_to_cv2(frame, "bgr8")

		frame = cv.CreateImageHeader((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 3)
		cv.SetData( frame, source.tostring(), source.dtype.itemsize * 3 * source.shape[1] )

	except CvBridgeError as e:
		print e	

        self.cd.processFrame(frame)
        self.cd.drawDetectedMarkers()
        self.cd.showProcessedFrame()
        self.cd.handleKeyboardEvents()
	y = self.cd.returnPositions() 
	self.publishMarkerLocations(y)
	img = numpy.asarray(self.cd.processedFrame[:,:])
	self.publishImage(img)


    def publishMarkerLocations(self, locations):
#	pkt = self.perspectiveConverter.convert(px_pkt)

#	print "before homography:", locations[0].x, locations[0].y
	x,y = self.perspectiveConverter.convert( (locations[0].x, locations[0].y) )

#	print "after homography:", x, y
#	print x,y
#	print("x: %8.7f y: %8.7f angle: %8.7f quality: %8.7f order: %s lock: %d" % (x, y, locations[0].theta, locations[0].quality, locations[0].order, locations[0].order_match))
        self.pub.publish(  DronePose( x, y, locations[0].theta, locations[0].quality, locations[0].order_match )  )

    def publishImage(self, Image):
        try:
                self.imagePub.publish(self.bridge.cv2_to_imgmsg(Image, 'bgr8'))
        except CvBridgeError, e:
                print e
class marker_locator_node():
    def __init__(self):
        rospy.init_node('MarkerLocator')

        self.rate = rospy.Rate(10)  # Hz

        # needed to convert image
        self.bridge = CvBridge()

        # read topic names
        markerimage_sub_topic = rospy.get_param("~markerimage_sub",
                                                '/markerlocator/image_raw')
        markerpose_pub_topic = rospy.get_param("~markerpose_pub",
                                               '/markerlocator/markerpose')

        # read parameters
        self.show_image = rospy.get_param("~show_image", False)
        self.print_debug_messages = rospy.get_param("~print_debug_messages",
                                                    False)
        down_scale_factor = rospy.get_param("/image_downscale_factor", 1.0)
        self.marker_order = rospy.get_param("~marker_order", 0)
        marker_size = rospy.get_param("~marker_size", 0) / down_scale_factor
        calib_a_wld = [
            rospy.get_param("/calibrate_a_world_x", 0),
            rospy.get_param("/calibrate_a_world_y", 0)
        ]
        calib_a_img = [
            rospy.get_param("/calibrate_a_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_a_image_y", 0) / down_scale_factor
        ]
        calib_b_wld = [
            rospy.get_param("/calibrate_b_world_x", 0),
            rospy.get_param("/calibrate_b_world_y", 0)
        ]
        calib_b_img = [
            rospy.get_param("/calibrate_b_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_b_image_y", 0) / down_scale_factor
        ]
        calib_c_wld = [
            rospy.get_param("/calibrate_c_world_x", 0),
            rospy.get_param("/calibrate_c_world_y", 0)
        ]
        calib_c_img = [
            rospy.get_param("/calibrate_c_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_c_image_y", 0) / down_scale_factor
        ]
        calib_d_wld = [
            rospy.get_param("/calibrate_d_world_x", 0),
            rospy.get_param("/calibrate_d_world_y", 0)
        ]
        calib_d_img = [
            rospy.get_param("/calibrate_d_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_d_image_y", 0) / down_scale_factor
        ]

        # static parameters
        scaling_parameter = 1000  # only for (debug) plotting purposes

        # Calibration of setup, so that the coordinates correspond to real world coordinates.
        calib_pts_image = [calib_a_img, calib_b_img, calib_c_img, calib_d_img]
        calib_pts_world = [calib_a_wld, calib_b_wld, calib_c_wld, calib_d_wld]
        self.perspective_corrector = PerspectiveCorrecter(
            calib_pts_image, calib_pts_world)
        if self.print_debug_messages:
            print 'Calibration points image:', calib_pts_image
            print 'Calibration points world:', calib_pts_world

        # instantiate camera driver
        self.cd = CameraDriver(self.marker_order, marker_size,
                               scaling_parameter)
        if self.show_image:
            self.cd.open_image_window()

        # instantiate markerpose publisher
        self.markerpose_msg = markerpose()
        self.markerpose_pub = rospy.Publisher(markerpose_pub_topic,
                                              markerpose,
                                              queue_size=0)

        # instantiate image subscribers
        self.time_prev_image = time()
        rospy.Subscriber(markerimage_sub_topic, Image, self.on_new_image)

        # call updater routine
        self.updater()

    def on_new_image(self, msg):
        self.cd.current_frame = self.bridge.imgmsg_to_cv2(msg, "8UC1")
        self.cd.process_frame()
        if self.show_image:
            self.cd.show_processed_frame()
            self.cd.handle_keyboard_events()

        # publish the marker position
        self.markerpose_msg.header.stamp = rospy.get_rostime()
        pose_corrected = self.perspective_corrector.convertPose(
            self.cd.location)
        self.markerpose_msg.order = pose_corrected.order
        self.markerpose_msg.x = pose_corrected.x
        self.markerpose_msg.y = pose_corrected.y
        self.markerpose_msg.theta = pose_corrected.theta
        self.markerpose_msg.quality = pose_corrected.quality
        self.markerpose_msg.timestamp = msg.header.stamp
        self.markerpose_pub.publish(self.markerpose_msg)

        # print debug info
        if self.print_debug_messages == True:
            print(
                "Time: %.3f  Marker: %s  x: %8.3f  y:%8.3f  theta: %8.3f  quality: %8.3f  interval: %.3f"
                % (msg.header.stamp.to_sec(), pose_corrected.order,
                   pose_corrected.x, pose_corrected.y, pose_corrected.theta,
                   pose_corrected.quality, (time() - self.time_prev_image)))
            self.time_prev_image = time()

    def updater(self):
        while not rospy.is_shutdown():
            self.rate.sleep()
class marker_locator_node():
	def __init__(self):
		rospy.init_node('MarkerLocator')

		self.rate = rospy.Rate(10) # Hz

		# needed to convert image
		self.bridge = CvBridge()

		# read topic names
		markerimage_sub_topic = rospy.get_param("~markerimage_sub",'/markerlocator/image_raw')
		markerpose_pub_topic = rospy.get_param("~markerpose_pub",'/markerlocator/markerpose')

		# read parameters
		self.show_image = rospy.get_param("~show_image", False)
		self.print_debug_messages = rospy.get_param("~print_debug_messages", False)
		down_scale_factor = rospy.get_param("/image_downscale_factor", 1.0)
		self.marker_order = rospy.get_param("~marker_order", 0)
		marker_size = rospy.get_param("~marker_size", 0) / down_scale_factor
		calib_a_wld = [rospy.get_param("/calibrate_a_world_x", 0), rospy.get_param("/calibrate_a_world_y", 0)]
		calib_a_img = [rospy.get_param("/calibrate_a_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_a_image_y", 0) / down_scale_factor]
		calib_b_wld = [rospy.get_param("/calibrate_b_world_x", 0), rospy.get_param("/calibrate_b_world_y", 0)]
		calib_b_img = [rospy.get_param("/calibrate_b_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_b_image_y", 0) / down_scale_factor]
		calib_c_wld = [rospy.get_param("/calibrate_c_world_x", 0), rospy.get_param("/calibrate_c_world_y", 0)]
		calib_c_img = [rospy.get_param("/calibrate_c_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_c_image_y", 0) / down_scale_factor]
		calib_d_wld = [rospy.get_param("/calibrate_d_world_x", 0), rospy.get_param("/calibrate_d_world_y", 0)]
		calib_d_img = [rospy.get_param("/calibrate_d_image_x", 0) / down_scale_factor,
					   rospy.get_param("/calibrate_d_image_y", 0) / down_scale_factor]

		# static parameters
		scaling_parameter = 1000 # only for (debug) plotting purposes

		# Calibration of setup, so that the coordinates correspond to real world coordinates.
		calib_pts_image = [calib_a_img, calib_b_img, calib_c_img, calib_d_img]
		calib_pts_world = [calib_a_wld, calib_b_wld, calib_c_wld, calib_d_wld]
		self.perspective_corrector = PerspectiveCorrecter(calib_pts_image, calib_pts_world)
		if self.print_debug_messages:
			print 'Calibration points image:', calib_pts_image
			print 'Calibration points world:', calib_pts_world

		# instantiate camera driver
		self.cd = CameraDriver(self.marker_order, marker_size, scaling_parameter)
		if self.show_image:
			self.cd.open_image_window()

		# instantiate markerpose publisher
		self.markerpose_msg = markerpose()
		self.markerpose_pub = rospy.Publisher(markerpose_pub_topic, markerpose, queue_size=0)

		# instantiate image subscribers
		self.time_prev_image = time()
		rospy.Subscriber(markerimage_sub_topic, Image, self.on_new_image)

		# call updater routine
		self.updater()  

	def on_new_image(self, msg):
		self.cd.current_frame = self.bridge.imgmsg_to_cv2(msg, "8UC1")
		self.cd.process_frame()
		if self.show_image:
			self.cd.show_processed_frame()
			self.cd.handle_keyboard_events()

		# publish the marker position
		self.markerpose_msg.header.stamp = rospy.get_rostime()
		pose_corrected = self.perspective_corrector.convertPose(self.cd.location)
		self.markerpose_msg.order = pose_corrected.order
		self.markerpose_msg.x = pose_corrected.x
		self.markerpose_msg.y = pose_corrected.y
		self.markerpose_msg.theta = pose_corrected.theta
		self.markerpose_msg.quality = pose_corrected.quality
		self.markerpose_msg.timestamp = msg.header.stamp
		self.markerpose_pub.publish(self.markerpose_msg)

		# print debug info
		if self.print_debug_messages == True:
			print("Time: %.3f  Marker: %s  x: %8.3f  y:%8.3f  theta: %8.3f  quality: %8.3f  interval: %.3f" % (msg.header.stamp.to_sec(), pose_corrected.order, pose_corrected.x, pose_corrected.y, pose_corrected.theta, pose_corrected.quality, (time() - self.time_prev_image)))
			self.time_prev_image = time()

	def updater(self):
		while not rospy.is_shutdown():
			self.rate.sleep()
    def __init__(self):
        rospy.init_node('MarkerLocator')

        self.rate = rospy.Rate(10)  # Hz

        # needed to convert image
        self.bridge = CvBridge()

        # read topic names
        markerimage_sub_topic = rospy.get_param("~markerimage_sub",
                                                '/markerlocator/image_raw')
        markerpose_pub_topic = rospy.get_param("~markerpose_pub",
                                               '/markerlocator/markerpose')

        # read parameters
        self.show_image = rospy.get_param("~show_image", False)
        self.print_debug_messages = rospy.get_param("~print_debug_messages",
                                                    False)
        down_scale_factor = rospy.get_param("/image_downscale_factor", 1.0)
        self.marker_order = rospy.get_param("~marker_order", 0)
        marker_size = rospy.get_param("~marker_size", 0) / down_scale_factor
        calib_a_wld = [
            rospy.get_param("/calibrate_a_world_x", 0),
            rospy.get_param("/calibrate_a_world_y", 0)
        ]
        calib_a_img = [
            rospy.get_param("/calibrate_a_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_a_image_y", 0) / down_scale_factor
        ]
        calib_b_wld = [
            rospy.get_param("/calibrate_b_world_x", 0),
            rospy.get_param("/calibrate_b_world_y", 0)
        ]
        calib_b_img = [
            rospy.get_param("/calibrate_b_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_b_image_y", 0) / down_scale_factor
        ]
        calib_c_wld = [
            rospy.get_param("/calibrate_c_world_x", 0),
            rospy.get_param("/calibrate_c_world_y", 0)
        ]
        calib_c_img = [
            rospy.get_param("/calibrate_c_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_c_image_y", 0) / down_scale_factor
        ]
        calib_d_wld = [
            rospy.get_param("/calibrate_d_world_x", 0),
            rospy.get_param("/calibrate_d_world_y", 0)
        ]
        calib_d_img = [
            rospy.get_param("/calibrate_d_image_x", 0) / down_scale_factor,
            rospy.get_param("/calibrate_d_image_y", 0) / down_scale_factor
        ]

        # static parameters
        scaling_parameter = 1000  # only for (debug) plotting purposes

        # Calibration of setup, so that the coordinates correspond to real world coordinates.
        calib_pts_image = [calib_a_img, calib_b_img, calib_c_img, calib_d_img]
        calib_pts_world = [calib_a_wld, calib_b_wld, calib_c_wld, calib_d_wld]
        self.perspective_corrector = PerspectiveCorrecter(
            calib_pts_image, calib_pts_world)
        if self.print_debug_messages:
            print 'Calibration points image:', calib_pts_image
            print 'Calibration points world:', calib_pts_world

        # instantiate camera driver
        self.cd = CameraDriver(self.marker_order, marker_size,
                               scaling_parameter)
        if self.show_image:
            self.cd.open_image_window()

        # instantiate markerpose publisher
        self.markerpose_msg = markerpose()
        self.markerpose_pub = rospy.Publisher(markerpose_pub_topic,
                                              markerpose,
                                              queue_size=0)

        # instantiate image subscribers
        self.time_prev_image = time()
        rospy.Subscriber(markerimage_sub_topic, Image, self.on_new_image)

        # call updater routine
        self.updater()
Пример #19
0
#!/usr/bin/python
from PerspectiveTransform import PerspectiveCorrecter
from math import hypot

pointLocationsInImage = [[952, 533], [946, 3], [1668, 3], [1667, 527]]
realCoordinates = [[0, 0], [0, 109], [150.1, 109], [150.1, 0]]
perspectiveConverter = PerspectiveCorrecter(pointLocationsInImage, realCoordinates)



# not image 14 in /home/exchizz/
px_pkt = (208,377)
meter_pktl = perspectiveConverter.convert(px_pkt)

px_pkt = (95,319)
meter_pktr = perspectiveConverter.convert(px_pkt)


distance = hypot(meter_pktl[0] - meter_pktr[0],meter_pktl[1] - meter_pktr[1])

print "Test 1 distance is: ", distance,  " error: ", 50-distance

exit()
# Image 15
px_pkt = (925,689)
meter_pktl = perspectiveConverter.convert(px_pkt)

px_pkt = (1246,839)
meter_pktr = perspectiveConverter.convert(px_pkt)