Beispiel #1
0
def CreateMonoBag(imgs,bagname):
    '''Creates a bag file with camera images'''
    bag =rosbag.Bag(bagname, 'w')

    try:
        for i in range(len(imgs)):
            print("Adding %s" % imgs[i])
            fp = open( imgs[i], "r" )
            p = ImageFile.Parser()

            while 1:
                s = fp.read(1024)
                if not s:
                    break
                p.feed(s)

            im = p.close()

            Stamp = rospy.rostime.Time.from_sec(time.time())
            Img = Image()
            Img.header.stamp = Stamp
            Img.width = im.size[0]
            Img.height = im.size[1]
            Img.encoding = "rgb8"
            Img.header.frame_id = "camera"
            Img_data = [pix for pixdata in im.getdata() for pix in pixdata]
            Img.data = Img_data

            bag.write('camera/image_raw', Img, Stamp)
    finally:
        bag.close()       
Beispiel #2
0
def CreateMonoBag(imgs,bagname):
    '''Creates a bag file with camera images'''
    bag = rosbag.Bag(bagname, 'w')
    imgs = sorted(imgs)

    try:
        for i in range(len(imgs)):
            print("Adding %s" % imgs[i])
            fp = open( imgs[i], "r" )
            p = ImageFile.Parser()

            rgb_file = imgs[i]
            llim = rgb_file.rfind('/')
            rlim = rgb_file.rfind('.')
            rgb_ext = rgb_file[rlim:]
            msec = rgb_file[llim+1:rlim]
            sec = float(msec) / 1000 # msec to sec

            while 1:
                s = fp.read(1024)
                if not s:
                    break
                p.feed(s)

            im = p.close()

            # Stamp = rospy.rostime.Time.from_sec(time.time())
            Stamp = rospy.rostime.Time.from_sec(sec)
            Img = Image()
            Img.header.stamp = Stamp
            Img.width = im.size[0]
            Img.height = im.size[1]
            Img.encoding = "rgb8"
            Img.header.frame_id = "camera"
            Img_data = [pix for pixdata in im.getdata() for pix in pixdata]
            Img.data = Img_data

            bag.write('camera/rgb/image_color', Img, Stamp)

            #####
            d_file = rgb_file.replace(rgb_ext, '.txt')
            print("Adding %s" % d_file)
            fid = open(d_file, 'r')
            raw = fid.readlines()
            fid.close()
            #depth = numpy.reshape(raw, (im.size[1], im.size[0]))

            Img_depth = Image()
            Img_depth.header.stamp = Stamp
            Img_depth.width = im.size[0]
            Img_depth.height = im.size[1]
            Img_depth.encoding = "rgb8"
            Img_depth.header.frame_id = "camera"
            #Img_data = [pix for pixdata in im.getdata() for pix in pixdata]
            Img_depth.data = raw
            bag.write('camera/depth/image', Img, Stamp)
    finally:
        bag.close()       
Beispiel #3
0
 def appendMessages(self, stamp, messages):
     if not self._image is None:
         # Build the image message and push it on the message list
         msg = Image()
         msg.header.stamp = stamp
         msg.header.frame_id = self._frameId
         msg.width = self._image.shape[0]
         msg.height = self._image.shape[1]
         if (len(self._image.shape) == 2) or (len(self._image.shape) == 3 and self._image.shape[2] == 1):
             # A gray image
             msg.encoding = '8UC1'
             stepMult = 1
         elif len(self._image.shape) == 3 and self._image.shape[2] == 3:
             # A color image
             msg.encoding = 'rgb8'
             stepMult = 3
         elif len(self._image.shape) == 3 and self._image.shape[2] == 4:
             # A color image
             msg.encoding = 'rgba8'
             stepMult = 3
         else:
             raise RuntimeError("The parsing of images is very simple. " +\
                                "Only 3-channel rgb (rgb8), 4 channel rgba " +\
                                "(rgba8) and 1 channel mono (mono8) are " +\
                                "supported. Got an image with shape " +\
                                "{0}".format(self._image.shape))
         msg.is_bigendian = False
         msg.step = stepMult * msg.width
         msg.data = self._image.flatten().tolist()
         messages.append((self._topic, msg))
Beispiel #4
0
def CreateStereoBag(left_imgs, right_imgs, bagname):
    '''Creates a bag file containing stereo image pairs'''
    bag =rosbag.Bag(bagname, 'w')

    try:
        for i in range(len(left_imgs)):
            print("Adding %s" % left_imgs[i])
            fp_left = open( left_imgs[i], "r" )
            p_left = ImageFile.Parser()

            while 1:
                s = fp_left.read(1024)
                if not s:
                    break
                p_left.feed(s)

            im_left = p_left.close()

            fp_right = open( right_imgs[i], "r" )
            print("Adding %s" % right_imgs[i])
            p_right = ImageFile.Parser()

            while 1:
                s = fp_right.read(1024)
                if not s:
                    break
                p_right.feed(s)

            im_right = p_right.close()

            Stamp = roslib.rostime.Time.from_sec(time.time())

            Img_left = Image()
            Img_left.header.stamp = Stamp
            Img_left.width = im_left.size[0]
            Img_left.height = im_left.size[1]
            Img_left.encoding = "rgb8"
            Img_left.header.frame_id = "camera/left"
            Img_left_data = [pix for pixdata in im_left.getdata() for pix in pixdata]
            Img_left.data = Img_left_data
            Img_right = Image()
            Img_right.header.stamp = Stamp
            Img_right.width = im_right.size[0]
            Img_right.height = im_right.size[1]
            Img_right.encoding = "rgb8"
            Img_right.header.frame_id = "camera/right"
            Img_right_data = [pix for pixdata in im_right.getdata() for pix in pixdata]
            Img_right.data = Img_right_data

            bag.write('camera/left/image_raw', Img_left, Stamp)
            bag.write('camera/right/image_raw', Img_right, Stamp)
    finally:
        bag.close()
Beispiel #5
0
def cv_to_imgmsg(cvim, encoding = "passthrough"):
    try:
        return bridge.cv_to_imgmsg(cvim, encoding)
    except:
        img_msg = Image()
        (img_msg.width, img_msg.height) = cv.GetSize(cvim)
        if encoding == "passthrough":
            img_msg.encoding = bridge.cvtype_to_name[cv.GetElemType(cvim)]
        else:
            img_msg.encoding = encoding
            if encoding_as_cvtype(encoding) != cv.GetElemType(cvim):
                raise CvBridgeError, "invalid encoding"
        img_msg.data = cvim.tostring()
        img_msg.step = len(img_msg.data) / img_msg.height
        return img_msg
def convert(data):
	width=data.info.width
	height=data.info.height
	pixelList = [0] *width*height
	
	imageMsg=Image()
	imageMsg.header.stamp = rospy.Time.now()
	imageMsg.header.frame_id = '1'
	imageMsg.height = height
	imageMsg.width = width
	imageMsg.encoding = 'mono8'
	imageMsg.is_bigendian = 0
	imageMsg.step = width
	
	for h in range(height):
		for w in range(width):
			if data.data[h*width+w]==-1:
				pixelList[h*width+w] = 150
			elif data.data[h*width+w]==0:
				pixelList[h*width+w] = 0
			elif data.data[h*width+w]==100:
				pixelList[h*width+w] = 255	
			else:
				pixelList[h*width+w]=data.data[h*width+w]
				print 'ERROR'
				
	imageMsg.data = pixelList
	imagePub.publish(imageMsg)
Beispiel #7
0
def airpub():
    pub = rospy.Publisher("airsim/image_raw", Image, queue_size=1)
    rospy.init_node('image_raw', anonymous=True)
    rate = rospy.Rate(10) # 10hz

    # connect to the AirSim simulator 
    client = airsim.MultirotorClient()
    client.confirmConnection()

    while not rospy.is_shutdown():
         # get camera images from the car
        responses = client.simGetImages([
            airsim.ImageRequest("1", airsim.ImageType.Scene, False, False)])  #scene vision image in uncompressed RGBA array

        for response in responses:
            img_rgba_string = response.image_data_uint8

        # Populate image message
        msg=Image() 
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = "frameId"
        msg.encoding = "rgba8"
        msg.height = 360  # resolution should match values in settings.json 
        msg.width = 640
        msg.data = img_rgba_string
        msg.is_bigendian = 0
        msg.step = msg.width * 4

        # log time and size of published image
        rospy.loginfo(len(response.image_data_uint8))
        # publish image message
        pub.publish(msg)
        # sleep until next cycle
        rate.sleep()
 def publish( self ):
     # Get the image.
     image = self.__videoDeviceProxy.getImageRemote( self.__videoDeviceProxyName );
         
     # Create Image message.
     ImageMessage = Image();
     ImageMessage.header.stamp.secs = image[ 5 ];
     ImageMessage.width = image[ 0 ];
     ImageMessage.height = image[ 1 ];
     ImageMessage.step = image[ 2 ] * image[ 0 ];
     ImageMessage.is_bigendian = False;
     ImageMessage.encoding = 'bgr8';
     ImageMessage.data = image[ 6 ];
     
     self.__imagePublisher.publish( ImageMessage );
     
     # Create CameraInfo message.
     # Data from the calibration phase is hard coded for now.
     CameraInfoMessage = CameraInfo();
     CameraInfoMessage.header.stamp.secs = image[ 5 ];
     CameraInfoMessage.width = image[ 0 ];
     CameraInfoMessage.height = image[ 1 ];
     CameraInfoMessage.D = [ -0.0769218451517258, 0.16183180613612602, 0.0011626049774280595, 0.0018733894100460534, 0.0 ];
     CameraInfoMessage.K = [ 581.090096189648, 0.0, 341.0926325830606, 0.0, 583.0323248080421, 241.02441593704128, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.R = [ 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0 ];
     CameraInfoMessage.P = [ 580.5918359588198, 0.0, 340.76398441334106, 0.0, 0.0, 582.4042541534321, 241.04182225157172, 0.0, 0.0, 0.0, 1.0, 0.0] ;
     CameraInfoMessage.distortion_model = 'plumb_bob';
     
     #CameraInfoMessage.roi.x_offset = self.__ROIXOffset;
     #CameraInfoMessage.roi.y_offset = self.__ROIYOffset;
     #CameraInfoMessage.roi.width = self.__ROIWidth;
     #CameraInfoMessage.roi.height = self.__ROIHeight;
     #CameraInfoMessage.roi.do_rectify = self.__ROIDoRectify;
     
     self.__cameraInfoPublisher.publish( CameraInfoMessage );
Beispiel #9
0
    def default(self, ci="unused"):
        if not self.component_instance.capturing:
            return  # press [Space] key to enable capturing

        image_local = self.data["image"]

        image = Image()
        image.header = self.get_ros_header()
        image.header.frame_id += "/base_image"
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = "rgba8"
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data["intrinsic_matrix"]

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = "plumb_bob"
        camera_info.K = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
        ]
        camera_info.R = R
        camera_info.P = [
            intrinsic[0][0],
            intrinsic[0][1],
            intrinsic[0][2],
            Tx,
            intrinsic[1][0],
            intrinsic[1][1],
            intrinsic[1][2],
            Ty,
            intrinsic[2][0],
            intrinsic[2][1],
            intrinsic[2][2],
            0,
        ]

        self.publish(image)
        self.topic_camera_info.publish(camera_info)
Beispiel #10
0
def numpy_to_imgmsg(image, stamp=None):
    from sensor_msgs.msg import Image 
    rosimage = Image()
    rosimage.height = image.shape[0]
    rosimage.width = image.shape[1]
    if image.dtype == np.uint8:
        rosimage.encoding = '8UC%d' % image.shape[2]
        rosimage.step = image.shape[2] * rosimage.width
        rosimage.data = image.ravel().tolist()
    else:
        rosimage.encoding = '32FC%d' % image.shape[2]
        rosimage.step = image.shape[2] * rosimage.width * 4
        rosimage.data = np.array(image.flat, dtype=np.float32).tostring()
    if stamp is not None:
        rosimage.header.stamp = stamp
    return rosimage
def copy_Image_empty_data(image_old):
    image_new = Image()
    image_new.header = copy(image_old.header)
    image_new.height = copy(image_old.height)
    image_new.width = copy(image_old.width)
    image_new.encoding = copy(image_old.encoding)
    image_new.is_bigendian = copy(image_old.is_bigendian)
    image_new.step = copy(image_old.step)
    return image_new
    def callback(self, data):
        #print "got some shit coming in"
        #if data is not None:
            #plane= data.pose.position
            #nrm= norm([plane.x, plane.y, plane.z])

            #normal= np.array([plane.x, plane.y, plane.z])/nrm

            #print "got here"

            ##replace with numpy array
            #plane= np.array([plane.x, plane.y, plane.z])

            ##get the rotation matrix
            #rmatrix= rotationMatrix(normal)

            ##print rmatrix

            ##for point in data.points:
                ##print point
                ##p= np.array([point.x, point.y, point.z])
                ##flattened_point= project(p, plane, normal)
                ##print flattened_point
                ##print np.dot(rmatrix,flattened_point)

        try:
            resp= self.seperation()
            print resp.result
            #self.pub.publish(resp.clusters[0])
            im= Image()
            im.header.seq= 72
            im.header.stamp.secs= 1365037570
            im.header.stamp.nsecs= 34077284
            im.header.frame_id= '/camera_rgb_optical_frame'
            im.height= 480
            im.width= 640
            im.encoding= '16UC1'
            im.is_bigendian= 0
            im.step= 1280
            im.data= [100 for n in xrange(1280*480)]

            for point in resp.clusters[0].points:
                x= point.x * 640
                y= point.y * 480
                im.data[y*1280 + x] = 10

            pub_image.publish(im)

        except Exception, e:
            print "service call failed"
            print e
Beispiel #13
0
    def process_frame(self,cam_id,buf,buf_offset,timestamp,framenumber):
        if have_ROS:
            msg = Image()
            msg.header.seq=framenumber
            msg.header.stamp=rospy.Time.from_sec(timestamp)
            msg.header.frame_id = "0"

            npbuf = np.array(buf)
            (height,width) = npbuf.shape

            msg.height = height
            msg.width = width
            msg.encoding = self.encoding
            msg.step = width
            msg.data = npbuf.tostring() # let numpy convert to string

            with self.publisher_lock:
                cam_info = self.camera_info
                cam_info.header.stamp = msg.header.stamp
                cam_info.header.seq = msg.header.seq
                cam_info.header.frame_id = msg.header.frame_id
                cam_info.width = width
                cam_info.height = height

                self.publisher.publish(msg)
                self.publisher_cam_info.publish(cam_info)
        return [],[]
Beispiel #14
0
    def __init__(self):
        rospy.init_node('image_publish')
        name = sys.argv[1]
        image = cv2.imread(name)
        #cv2.imshow("im", image)
        #cv2.waitKey(5)

        hz = rospy.get_param("~rate", 1)
        frame_id = rospy.get_param("~frame_id", "map")
        use_image = rospy.get_param("~use_image", True)
        rate = rospy.Rate(hz)

        self.ci_in = None
        ci_sub = rospy.Subscriber('camera_info_in', CameraInfo,
                                  self.camera_info_callback, queue_size=1)

        if use_image:
            pub = rospy.Publisher('image', Image, queue_size=1)
        ci_pub = rospy.Publisher('camera_info', CameraInfo, queue_size=1)

        msg = Image()
        msg.header.stamp = rospy.Time.now()
        msg.header.frame_id = frame_id
        msg.encoding = 'bgr8'
        msg.height = image.shape[0]
        msg.width = image.shape[1]
        msg.step = image.shape[1] * 3
        msg.data = image.tostring()
        if use_image:
            pub.publish(msg)

        ci = CameraInfo()
        ci.header = msg.header
        ci.height = msg.height
        ci.width = msg.width
        ci.distortion_model ="plumb_bob"
        # TODO(lucasw) need a way to set these values- have this node
        # subscribe to an input CameraInfo?
        ci.D = [0.0, 0.0, 0.0, 0, 0]
        ci.K = [500.0, 0.0, msg.width/2, 0.0, 500.0, msg.height/2, 0.0, 0.0, 1.0]
        ci.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        ci.P = [500.0, 0.0, msg.width/2, 0.0, 0.0, 500.0, msg.height/2, 0.0,  0.0, 0.0, 1.0, 0.0]
        # ci_pub.publish(ci)

        # TODO(lwalter) only run this is hz is positive,
        # otherwise wait for input trigger message to publish an image
        while not rospy.is_shutdown():
            if self.ci_in is not None:
                ci = self.ci_in

            msg.header.stamp = rospy.Time.now()
            ci.header = msg.header
            if use_image:
                pub.publish(msg)
            ci_pub.publish(ci)

            if hz <= 0:
                rospy.sleep()
            else:
                rate.sleep()
Beispiel #15
0
    def main_loop(self):
        img = Image()
        while not rospy.is_shutdown():
            #print "getting image..",
            image = self.camProxy.getImageRemote(self.nameId)
            #print "ok"
            # TODO: better time
            img.header.stamp = rospy.Time.now()
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            #colorspace = image[3]
            if image[3] == kYUVColorSpace:
                encoding = "mono8"
            elif image[3] == kRGBColorSpace:
                encoding = "rgb8"
            elif image[3] == kBGRColorSpace:
                encoding = "bgr8"
            else:
                rospy.logerror("Received unknown encoding: {0}".format(image[3]))

            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]
            self.info_.width = img.width
            self.info_.height = img.height
            self.info_.header = img.header
            self.pub_img_.publish(img)
            self.pub_info_.publish(self.info_)
            rospy.sleep(0.0001)# TODO: is this necessary?


        self.camProxy.unsubscribe(self.nameId)
Beispiel #16
0
def post_image(self, component_instance):
    """ Publish the data of the Camera as a ROS-Image message.

    """
    image_local = component_instance.local_data['image']
    if not image_local or image_local == '' or not image_local.image or not component_instance.capturing:
        return # press [Space] key to enable capturing

    parent_name = component_instance.robot_parent.blender_obj.name

    image = Image()
    image.header.stamp = rospy.Time.now()
    image.header.seq = self._seq
    # http://www.ros.org/wiki/geometry/CoordinateFrameConventions#Multi_Robot_Support
    image.header.frame_id = ('/' + parent_name + '/base_image')
    image.height = component_instance.image_height
    image.width = component_instance.image_width
    image.encoding = 'rgba8'
    image.step = image.width * 4
    # NOTE: Blender returns the image as a binary string encoded as RGBA
    # sensor_msgs.msg.Image.image need to be len() friendly
    # TODO patch ros-py3/common_msgs/sensor_msgs/src/sensor_msgs/msg/_Image.py
    # to be C-PyBuffer "aware" ? http://docs.python.org/c-api/buffer.html
    image.data = bytes(image_local.image)
    # RGBA8 -> RGB8 ? (remove alpha channel, save h*w bytes, CPUvore ?)
    # http://wiki.blender.org/index.php/Dev:Source/GameEngine/2.49/VideoTexture
    # http://www.blender.org/documentation/blender_python_api_2_57_release/bge.types.html#bge.types.KX_Camera.useViewport

    for topic in self._topics:
        # publish the message on the correct topic
        if str(topic.name) == str("/" + parent_name + "/" + component_instance.blender_obj.name):
            topic.publish(image)

    self._seq = self._seq + 1
	def publishCombined(self):
		#Enter Main Loop
		while not rospy.is_shutdown():
			
			#Convert to Numpy  Arrays
			map = []
			for i in range(0, self.numRobots):
				map.append(numpy.array(self.searchedData[i].data))
				
			combined2 = map[0]
			if self.numRobots > 1:
				#Find Minimum of all maps
				for i in range(1, self.numRobots):
					combined2 = numpy.minimum(combined2,map[i])
					
			#Pack Occupancy Grid Message
			mapMsg=OccupancyGrid()
			mapMsg.header.stamp=rospy.Time.now()
			mapMsg.header.frame_id=self.mapData.header.frame_id
			mapMsg.info.resolution=self.mapData.info.resolution
			mapMsg.info.width=self.mapData.info.width
			mapMsg.info.height=self.mapData.info.height
			mapMsg.info.origin=self.mapData.info.origin
			mapMsg.data=combined2.tolist()
			
			#Convert combined Occupancy grid values to grayscal image values
			combined2[combined2 == -1] = 150			#Unknown -1->150 		(gray)
			combined2[combined2 == 100] = 255			#Not_Searched 100->255	(white)
														#Searched=0				(black)
														
			#Calculate percentage of open area searched
			numNotSearched = combined2[combined2==255].size
			numSearched = combined2[combined2==0].size
			percentSearched = 100*float(numSearched)/(numNotSearched+numSearched)
			percentSearchedMsg = Float32()
			percentSearchedMsg.data = percentSearched
			self.percentPub.publish(percentSearchedMsg)
			
			#Pack Image Message
			imageMsg=Image()
			imageMsg.header.stamp = rospy.Time.now()
			imageMsg.header.frame_id = self.mapData.header.frame_id
			imageMsg.height = self.mapData.info.height
			imageMsg.width = self.mapData.info.width
			imageMsg.encoding = 'mono8'
			imageMsg.is_bigendian = 0
			imageMsg.step = self.mapData.info.width
			imageMsg.data = combined2.tolist()
			
			#Publish Combined Occupancy Grid and Image
			self.searchedCombinePub.publish(mapMsg)
			self.imagePub.publish(imageMsg)
			
			#Update Every 0.5 seconds
			rospy.sleep(1.0)
Beispiel #18
0
    def main_loop(self):
        img = Image()
        r = rospy.Rate(self.fps)
        while not rospy.is_shutdown():
            image = self.camProxy.getImageRemote(self.nameId)
            stampAL = image[4] + image[5]*1e-6
            #print image[5],  stampAL, "%lf"%(stampAL)
            img.header.stamp = rospy.Time(stampAL)
            img.header.frame_id = self.frame_id
            img.height = image[1]
            img.width = image[0]
            nbLayers = image[2]
            if image[3] == kYUVColorSpace:
                encoding = "mono8"
            elif image[3] == kRGBColorSpace:
                encoding = "rgb8"
            elif image[3] == kBGRColorSpace:
                encoding = "bgr8"
            elif image[3] == kYUV422ColorSpace:
                encoding = "yuv422" # this works only in ROS groovy and later
            else:
                rospy.logerror("Received unknown encoding: {0}".format(image[3]))

            img.encoding = encoding
            img.step = img.width * nbLayers
            img.data = image[6]
            infomsg = self.cim.getCameraInfo()
            infomsg.header = img.header
            self.pub_info_.publish(infomsg)
            self.pub_img_.publish(img)
            r.sleep()


        self.camProxy.unsubscribe(self.nameId)
Beispiel #19
0
 def publish_image(self):
     # get the image from the Nao
     img = self.nao_cam.getImageRemote(self.proxy_name)
     # copy the data into the ROS Image
     ros_img = Image()
     ros_img.width = img[0]
     ros_img.height = img[1]
     ros_img.step = img[2] * img[0]
     ros_img.header.stamp.secs = img[5]
     ros_img.data = img[6]
     ros_img.is_bigendian = False
     ros_img.encoding = "rgb8"
     ros_img.data = img[6]
     # publish the image
     self.nao_cam_pub.publish(ros_img)
Beispiel #20
0
 def show_diffeomorphisms(self):
     for i in range(len(self.estimators)): #estr in self.estimators:
         D = self.estimators[i].summarize()
         cmd = self.command_list[i]
         save_path = '/home/adam/'
         #Image.fromarray(diffeo_to_rgb_angle(D.d)).show()
         Image.fromarray(diffeo_to_rgb_angle(D.d)).save(save_path+'cmd'+str(cmd).replace(' ','')+'angle.png')
         Image.fromarray(diffeo_to_rgb_norm(D.d)).save(save_path+'cmd'+str(cmd).replace(' ','')+'nomr.png')
         Image.fromarray((D.variance*255).astype(np.uint8)).save(save_path+'cmd'+str(cmd).replace(' ','')+'variance.png')
 def OccupancyGridToNavImage(self, grid_map):
     img = Image()
     img.header = grid_map.header
     img.height = grid_map.info.height
     img.width = grid_map.info.width
     img.is_bigendian = 1
     img.step = grid_map.info.width
     img.encoding = "mono8"
     maxindex = img.height*img.width
     numpy.uint
     for i in range(0, maxindex, 1):
         if int(grid_map.data[i]) < 20:
             #img.data[i] = "0"
             data.append(0)
         else:
             img.data[i] = "255"
     return imgding
Beispiel #22
0
    def main_loop(self):
        img = Image()
        while not rospy.is_shutdown():
            #print "getting image..",
            images = self.camProxy.getImagesRemote  (self.nameId)
            #print "ok"
            # TODO: better time
            for i in [0,1]:
		#print len(images[i])
                image = images[i]
                img.header.stamp = rospy.Time.now()
                if image[7] == 0:
                    img.header.frame_id = "/CameraTop_frame"
                elif image[7] == 1:
                    img.header.frame_id = "/CameraBottom_frame"
                img.height = image[1]
                img.width = image[0]
                nbLayers = image[2]
                #colorspace = image[3]
                if image[3] == kYUVColorSpace:
                    encoding = "mono8"
                elif image[3] == kRGBColorSpace:
                    encoding = "rgb8"
                elif image[3] == kBGRColorSpace:
                    encoding = "bgr8"
                else:
                    rospy.logerror("Received unknown encoding: {0}".format(image[3]))
    
                img.encoding = encoding
                img.step = img.width * nbLayers
		if len(images) >= 2:
                	img.data = images[2][len(images[2])/2 * i:len(images[2])/2 *(i+1) + 1]
		else:
			img.data = []
			print "image with no data"
                self.info_[i].width = img.width
                self.info_[i].height = img.height
                self.info_[i].header = img.header
                self.pub_img_[i].publish(img)
                self.pub_info_[i].publish(self.info_[i])

	    self.camProxy.releaseImages(self.nameId)

        self.camProxy.unsubscribe(self.nameId)
def publish_state_image(state_from_env1, current_state_image_pub):
	current_state_image_msg = Image()
        current_state_image_msg.encoding = "mono8"
        current_state_image_msg.header.stamp = rospy.Time.now()
        current_state_image_msg.height = 68
        current_state_image_msg.width = 34
        current_state_image_msg.step = 68
        x = np.reshape(state_from_env1, (2,2312))
        idx_x = np.argwhere(x[0] == np.amax(x[0]))
        lx = idx_x.flatten().tolist()
        x[1][lx[0]] = 255
        x[1][lx[1]] = 255
        x[1][lx[2]] = 255
        y = x[1].tolist()
        current_state_image_msg.data = y
	current_state_image_pub.publish(current_state_image_msg)
Beispiel #24
0
    def default(self, ci='unused'):
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = self.encoding
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.D = [0]
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        if self.pub_tf:
            self.publish_with_robot_transform(image)
        else:
            self.publish(image)
        self.topic_camera_info.publish(camera_info)
def array_to_image(array):
    """Takes a NxMx3 array and converts it into a ROS image message.

    """
    # Sanity check the input array shape
    if len(array.shape) != 3 or array.shape[2] != 3:
        raise ValueError('Array must have shape MxNx3')

    # Ravel the array into a single buffer
    image_data = (array.astype(np.uint8)).tostring(order='C')

    # Create the image message
    image_msg = Image()
    image_msg.height = array.shape[0]
    image_msg.width = array.shape[1]
    image_msg.encoding = 'rgb8'
    image_msg.is_bigendian = 0
    image_msg.step = array.shape[1] * 3
    image_msg.data = image_data

    return image_msg
def main():
    pub = rospy.Publisher('image_maker', Image)
    rospy.init_node('image_maker')
    #rospy.wait_for_service('tabletop_segmentation')
    im= Image()
    im.header.seq= 72
    im.header.stamp.secs= 1365037570
    im.header.stamp.nsecs= 34077284
    im.header.frame_id= '/camera_rgb_optical_frame'
    im.height= 480
    im.width= 640
    im.encoding= '16UC1'
    im.is_bigendian= 0
    im.step= 1280
    im.data= [100 for n in xrange(1280*480)]

    while not rospy.is_shutdown():
        try:
            pub.publish(im)
            sleep(.5)

        except Exception, e:
            print e
def main():
  if len(sys.argv) < 2:
    print("Usage: {} dataset_name".format(sys.argv[0]))
    exit(1)

  file_name = sys.argv[1]
  log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
  camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))  

  zipped_log = izip(
    log_file['times'],
    log_file['fiber_accel'],
    log_file['fiber_gyro'])

  with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
    bar = Bar('Camera', max=len(camera_file['X']))
    for i, img_data in enumerate(camera_file['X']):
      m_img = Image()
      m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
      m_img.height = img_data.shape[1]
      m_img.width = img_data.shape[2]
      m_img.step = 3 * img_data.shape[2]
      m_img.encoding = 'rgb8'
      m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
      
      bag.write('/camera/image_raw', m_img, m_img.header.stamp)
      bar.next()
      
    bar.finish()

    bar = Bar('IMU', max=len(log_file['times']))
    for time, v_accel, v_gyro in zipped_log:
      m_imu = Imu()
      m_imu.header.stamp = rospy.Time.from_sec(time)
      [setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
      [setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]

      bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
      bar.next()

    bar.finish()
Beispiel #28
0
def GetImageFromFile(im_path):
	fp = open( im_path, "r" )
	p = ImageFile.Parser()
	while 1:
		s = fp.read(307218) # trying to read a full file in one shot ...
		if not s:
		    break
		p.feed(s)
	im = p.close() # we should now have an image object
	im_stamp = os.path.basename(im_path).split(".")[0] #image timestamp is directly encoded in file name
	im_stamp =  float(im_stamp)/1000000.0
	Stamp = rospy.rostime.Time.from_sec(im_stamp)
	Img = Image()
	Img.header.stamp = Stamp
	Img.width = im.size[0]
	Img.height = im.size[1]
	Img.encoding = "mono8" #needs to be changed to rgb8 for rgb images
	Img.step=Img.width #some nodes may complains ...
	Img.header.frame_id = "camera"
	Img_data = list(im.getdata()) #works for mono channels images (grayscale)
	#Img_data = [pix for pix in im.getdata()]
	#  Img_data = [pix for pixdata in im.getdata() for pix in pixdata]
	Img.data = Img_data
	return (im_stamp, Stamp, Img)
Beispiel #29
0
def numpy_to_image(arr, encoding):
	if not encoding in name_to_dtypes:
		raise TypeError('Unrecognized encoding {}'.format(encoding))

	im = Image(encoding=encoding)

	# extract width, height, and channels
	dtype_class, exp_channels = name_to_dtypes[encoding]
	dtype = np.dtype(dtype_class)
	if len(arr.shape) == 2:
		im.height, im.width, channels = arr.shape + (1,)
	elif len(arr.shape) == 3:
		im.height, im.width, channels = arr.shape
	else:
		raise TypeError("Array must be two or three dimensional")

	# check type and channels
	if exp_channels != channels:
		raise TypeError("Array has {} channels, {} requires {}".format(
			channels, encoding, exp_channels
		))
	if dtype_class != arr.dtype.type:
		raise TypeError("Array is {}, {} requires {}".format(
			arr.dtype.type, encoding, dtype_class
		))

	# make the array contiguous in memory, as mostly required by the format
	contig = np.ascontiguousarray(arr)
	im.data = contig.tostring()
	im.step = contig.strides[0]
	im.is_bigendian = (
		arr.dtype.byteorder == '>' or 
		arr.dtype.byteorder == '=' and sys.byteorder == 'big'
	)

	return im
import cv2
import math
from geometry_msgs.msg import Twist
from kobuki_msgs.msg import Led
from kobuki_msgs.msg import ButtonEvent
from kobuki_msgs.msg import BumperEvent
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from cmvision.msg import Blobs, Blob
from struct import unpack
import copy

blobsInfo = Blobs()
isBlobsInfoReady = False
depthImage = Image()
isDepthImageReady = False
colorImage = Image()
isColorImageReady = False
xLocation = 320
yLocation = 240
pub = rospy.Publisher('kobuki_command', Twist, queue_size=10)
command = Twist()
bumper = True
pub1 = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=10)
pub2 = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=10)
led = Led()


def mouseClick(event, x, y, flags, param):
    global xLocation, yLocation
	def __init__(self):
		'''
		This class is used to transmit data from morse node to a transparent node used in ros airship you will have to modify it.
		'''
		# Get the ~private namespace parameters from command line or launch file.
		init_message = rospy.get_param('~message', 'hello')
		self.rate = float(rospy.get_param('~rate', '30.0'))
		self.rate_before_sleep = self.rate
		self.topic_root = rospy.get_param('~topic', 'odometry')
		self.topic_imu = rospy.get_param('~topic_imu', 'imu')
		self.topic_hardcom = rospy.get_param('~topic_hardcom', 'hardcom')
		self.topic_camcom = rospy.get_param('~topic_camcom', 'cameracom')
		self.topic_command = rospy.get_param('~topic_command', 'command')
		rospy.loginfo("airship/" + self.topic_root + "/rate = " + str(self.rate) + "Hz")

		#Defining published messages
		'''
		RANGE FIELDS:
		Header header
		uint8 ULTRASOUND=0
		uint8 INFRARED=1
		uint8 radiation_type
		float32 field_of_view
		float32 min_range
		float32 max_range
		float32 range
		'''
		self.range = Range()

		'''
		IMU FIELDS:
		Header header

		geometry_msgs/Quaternion orientation
		float64[9] orientation_covariance # Row major about x, y, z axes

		geometry_msgs/Vector3 angular_velocity
		float64[9] angular_velocity_covariance # Row major about x, y, z axes

		geometry_msgs/Vector3 linear_acceleration
		float64[9] linear_acceleration_covariance # Row major x, y z 
		'''
		self.imu = Imu()

		'''
		IMAGE FIELDS
		Header header        

		uint32 height         # image height, that is, number of rows
		uint32 width          # image width, that is, number of columns

		string encoding       # Encoding of pixels -- channel meaning, ordering, size
		                      # taken from the list of strings in include/sensor_msgs/image_encodings.h
		uint8 is_bigendian    # is this data bigendian?
		uint32 step           # Full row length in bytes
		uint8[] data          # actual matrix data, size is (step * rows)
		'''
		self.image = Image()

		self.camera_info = CameraInfo()

		'''
		ODOMETRY FIELDS:
		std_msgs/Header header
		  uint32 seq
		  time stamp
		  string frame_id
		string child_frame_id
		geometry_msgs/PoseWithCovariance pose
		  geometry_msgs/Pose pose
		    geometry_msgs/Point position
		      float64 x
		      float64 y
		      float64 z
		    geometry_msgs/Quaternion orientation
		      float64 x
		      float64 y
		      float64 z
		      float64 w
		  float64[36] covariance
		geometry_msgs/TwistWithCovariance twist
		  geometry_msgs/Twist twist
		    geometry_msgs/Vector3 linear
		      float64 x
		      float64 y
		      float64 z
		    geometry_msgs/Vector3 angular
		      float64 x
		      float64 y
		      float64 z
		  float64[36] covariance

		'''
		self.estimated_pose = Odometry()

		'''
		Header header
  			uint32 seq
  			time stamp
  			string frame_id
		geometry_msgs/Pose pose
  		geometry_msgs/Point position
    			float64 x
    			float64 y
    			float64 z
  		geometry_msgs/Quaternion orientation
    			float64 x
    			float64 y
    			float64 z
    			float64 w
		'''
		self.real_pose = PoseStamped()
		self.is_sleep_state = False

		#Defining the subscriber
		rospy.Subscriber(self.topic_hardcom+"/LidarRange", Range, self.call_range)
		rospy.Subscriber(self.topic_imu+"/InertialData", Imu, self.call_imu)
		rospy.Subscriber(self.topic_camcom+"/CameraImg", Image, self.call_image)
		rospy.Subscriber(self.topic_camcom+"/CameraInfo", CameraInfo, self.call_camera_info)
		rospy.Subscriber(self.topic_command+"/isSleep", Bool, self.call_sleep)

		rospy.Subscriber("raw/pose", PoseStamped, self.call_pose)

		#Defining all the publisher
		self.estimated_pose_pub = rospy.Publisher(self.topic_root+"/EstimatedPose", Odometry, queue_size=10)

		while not rospy.is_shutdown():
			if not self.is_sleep_state:
				#######################################
				# This is where we can compute the estimated
				# pose
				#######################################

				self.estimated_pose_pub.publish(self.estimated_pose)

				#this is the rate of the publishment.
				if self.rate:
					rospy.sleep(1/self.rate)
				else:
					rospy.sleep(1.0)

		self.publish_onShutdown()		
Beispiel #32
0
def main():
    #Coordinates
    global coordinates_x
    global coordinates_y
    coordinates_x = [0]
    coordinates_y = [0]
    global xCoordinate
    global yCoordinate
    global resize
    global resize_width, resize_height
    global mean_x
    global mean_y

    #Start Timer
    stopFlag = Event()
    thread = LowPassFilter(stopFlag)
    thread.start()

    #Video Path
    #video_path = '' #Enter any video path relative to the script file

    #Treshold for Green in BGR Color Space
    greenLower = (40, 58, 77
                  )  # Less accurate -> (29,86,6)  kullanilan(29,50,150)
    greenUpper = (73, 255, 255)  #kullanilan   (64,255,255)

    #Detection in real time
    camera = cv2.VideoCapture(0)

    bridge = CvBridge()

    #Detection over video
    #camera = cv2.VideoCapture("video path")
    count = 0
    count2 = 0
    old_x = 0
    old_y = 0
    difference_x = mean_x - old_x
    difference_y = mean_y - old_y
    old_difference_x = 0
    old_difference_y = 0
    ball_detected = False

    while not rospy.is_shutdown(
    ):  #Use this command for detection over a video instead 'True' --> 'camera.isOpened()'

        #Read Frame
        ret, frame = camera.read()
        height, width = frame.shape[:
                                    2]  #frame = frame[0:height,0:int(width*0.5)]

        # Resize and Add Noise
        if resize == True:
            frame = imutils.resize(frame,
                                   width=resize_width,
                                   height=resize_height)

        # blurring the frame that's captured
        frame_gau_blur = cv2.GaussianBlur(frame, (3, 3), 1)

        hsv = cv2.cvtColor(frame_gau_blur, cv2.COLOR_BGR2HSV)
        green_range = cv2.inRange(hsv, greenLower, greenUpper)
        res_green = cv2.bitwise_and(frame_gau_blur,
                                    frame_gau_blur,
                                    mask=green_range)

        # Masking
        mask = cv2.erode(res_green, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=3)
        #mask = cv2.GaussianBlur(mask, (5, 5), 0)

        blue_s_gray = cv2.cvtColor(res_green, cv2.COLOR_BGR2GRAY)
        canny_edge = cv2.Canny(blue_s_gray, 200, 210)  #100,110
        # applying HoughCircles
        rows = blue_s_gray.shape[0]
        circles = cv2.HoughCircles(canny_edge,
                                   cv2.HOUGH_GRADIENT,
                                   dp=2,
                                   minDist=9,
                                   param1=10,
                                   param2=5,
                                   minRadius=0,
                                   maxRadius=0)
        cnts = cv2.findContours(blue_s_gray.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]
        #cnts = imutils.grab_contours(cnts)
        center = None

        #Execute only at least one contour found
        if len(cnts) > 0 and circles is not None:
            difference_x = mean_x - old_x
            difference_y = mean_y - old_y
            if (-5 < difference_x - old_difference_x < 5
                    and -5 < difference_y - old_difference_y < 5):
                count += 1
                count2 = 0

            else:
                print("difference_x=", difference_x)
                print("old_difference_x=", old_difference_x)
                count = 0
                count2 -= 1
                old_difference_x = difference_x
                old_difference_y = difference_y

                print("count:", count)
                print("count2:", count2)

            if (count2 < -1):
                ball_detected = False
                count = 0
                count2 = 0
                print("count is become 0")
            elif (count > 20):
                count = 0
                ball_detected = True
                old_x = mean_x
                old_y = mean_y
                print("count full")
            if (ball_detected is True):
                print("ball_detected True !!")
            else:
                print("ball_detected False !!")
            c = max(cnts, key=cv2.contourArea)
            (x, y), radius = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            print("radius: ", radius)
            if (M["m00"] != 0 or M["m00"] != 0):
                center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            else:
                center = (0, 0)
            # Select contours with a size bigger than 0.1
            if radius > 0.2 and ball_detected is True:
                # draw the circle and center
                cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255),
                           2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)

                img = Image()
                #height, width = frame.shape[:2]
                img = bridge.cv2_to_imgmsg(frame, "bgr8")
                imagePublisher.publish(img)

                #Hold Coordinates
                coordinates_x.append(center[0])
                coordinates_y.append(center[1])

                #Free Coordinates if timer is up
                if set == 0:
                    coordinates_x = []
                    coordinates_y = []
                    xCoordinate = 0
                    yCoordinate = 0

                frameHeight = frame.shape[0]
                frameWidth = frame.shape[1]
                coordinatePublisher.publish(
                    str(old_x) + "," + str(old_y) + "," + str(frameWidth) +
                    "," + str(frameHeight) + "," + str(radius))

        else:
            coordinatePublisher.publish("-")

        cv2.imshow("Frame", frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            stopFlag.set()
            break

    stopFlag.set()
    camera.release()
    cv2.destroyAllWindows()
Beispiel #33
0
package_directory = subprocess.check_output(["rospack", "find",
                                             "drone_cam"]).decode().strip('\n')
filename = package_directory + '/src/office_drone_c.mp4'
cap = cv2.VideoCapture(filename)
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
frames = 0
bridge = CvBridge()

topic = '/drone_c/camera/image'
pub = rospy.Publisher(topic, drone_feed, queue_size=10)
pubimg = rospy.Publisher(topic + '_Image', Image, queue_size=10)
rospy.init_node('drone3', anonymous=True)
rate = rospy.Rate(30)
img = drone_feed()
imgmsg = Image()


def pubcam():
    global frames, cap, frame_count
    while not rospy.is_shutdown():
        try:
            frames += 1
            if frames == frame_count:
                cap = cv2.VideoCapture(filename)
                frames = 0
            ret, frame = cap.read()
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            lower_red = np.array([30, 150, 50])
            upper_red = np.array([255, 255, 180])
            mask = cv2.inRange(hsv, lower_red, upper_red)
Beispiel #34
0
    def __init__(self, config):
        rospy.init_node("gqcnn_base_grasp", anonymous=True)

        # Moveit! setup
        moveit_commander.roscpp_initialize(sys.argv)
        self.robot = moveit_commander.RobotCommander()
        self.scene = moveit_commander.PlanningSceneInterface()
        self.arm = moveit_commander.MoveGroupCommander('arm')
        self.gripper = moveit_commander.MoveGroupCommander('gripper')
        self.arm.set_start_state_to_current_state()
        self.arm.set_pose_reference_frame('base')
        self.arm.set_planner_id('SBLkConfigDefault')
        self.arm.set_planning_time(10)
        self.arm.set_max_velocity_scaling_factor(0.04)
        self.arm.set_max_acceleration_scaling_factor(0.04)
        self.arm.set_goal_orientation_tolerance(0.1)
        self.arm.set_workspace([-2, -2, -2, 2, 2, 2])
        self.gripper.set_goal_joint_tolerance(0.2)
        rospy.loginfo(self.arm.get_pose_reference_frame())  #base
        rospy.loginfo(self.arm.get_planning_frame())  #world

        # messgae filter for image topic, need carefully set./camera/depth_registered/sw_registered/image_rect,,/camera/rgb/image_rect_color
        rospy.loginfo('wait_for_message')  ##############
        rospy.wait_for_message("/camera/rgb/image_raw", Image)  ###########
        rospy.wait_for_message("/camera/depth/image", Image)  ###########
        rospy.loginfo('start_callback')  ###########
        self.image_sub = message_filters.Subscriber("/camera/rgb/image_raw",
                                                    Image)
        self.depth_sub = message_filters.Subscriber("/camera/depth/image",
                                                    Image)
        self.camera_info_topic = "/camera/rgb/camera_info"
        self.camera_info = rospy.wait_for_message(self.camera_info_topic,
                                                  CameraInfo)
        rospy.loginfo(self.camera_info.header.frame_id)  ######

        self.ts = message_filters.ApproximateTimeSynchronizer(
            [self.image_sub, self.depth_sub], 1, 1)
        self.ts.registerCallback(self.cb)
        self.color_msg = Image()
        self.depth_msg = Image()
        self.mask = Image()

        # bounding box for objects
        self.bounding_box = BoundingBox()
        self.bounding_box.maxX = 420
        self.bounding_box.maxY = 250
        self.bounding_box.minX = 90
        self.bounding_box.minY = 40
        self.bridge = CvBridge()

        # transform listener
        self.listener = tf.TransformListener()

        # compute_ik service
        self.ik_srv = rospy.ServiceProxy('/compute_ik', GetPositionIK)
        rospy.loginfo("Waiting for /compute_ik service...")
        self.ik_srv.wait_for_service()
        rospy.loginfo("Connected!")
        self.service_request = PositionIKRequest()
        self.service_request.group_name = 'arm'
        self.service_request.timeout = rospy.Duration(2)
        self.service_request.avoid_collisions = True

        # signal
        self.start = 0
## 2- Select with the mosue a rectangle which only contains the shapes (the smaller the rectangle the better)
## 3- Press 'a' to detect and count the shapes
## Done for now but may need calibration when we try it (easy)
#import imutils
import rospy
import math
import numpy as np
import cv2
import time
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import String
from GUI.msg import Num
import os

uvc_img = Image()
bridge = CvBridge()
pub = rospy.Publisher('species_No', Num, queue_size=10)

#variables
c,C,T,L,S,w,h=0,0,0,0,0,0,0

def talker():
    msg = Num()
    msg.circlesNo = C
    msg.trianglesNo = T
    msg.linesNo = L
    msg.squaresNo = S
    pub.publish(msg)

def color_seg(choice):
def main():
    #Coordinates
    global coordinates_x
    global coordinates_y
    coordinates_x = [0]
    coordinates_y = [0]
    global xCoordinate
    global yCoordinate
    global resize
    global resize_width, resize_height
    global mean_x
    global mean_y

    #Start Timer
    stopFlag = Event()
    thread = LowPassFilter(stopFlag)
    thread.start()

    #Video Path
    #video_path = '' #Enter any video path relative to the script file

    #Treshold for Green in BGR Color Space
    greenLower = (
        23, 67, 48
    )  # Less accurate -> (29,86,6)  kullanilan(29,50,150)   med cim (15,0, 156)
    greenUpper = (88, 255, 255
                  )  #kullanilan   (64,255,255)        med cim (88,255,255)

    #Detection in real time
    camera = cv2.VideoCapture(0)

    bridge = CvBridge()
    kernel = np.ones((5, 5), np.uint8)

    #camera.set(3, 920)
    #camera.set(4, 1080)

    #Detection over video
    #camera = cv2.VideoCapture("video path")

    while not rospy.is_shutdown(
    ):  #Use this command for detection over a video instead 'True' --> 'camera.isOpened()'

        #Read Frame
        ret, frame = camera.read()
        height, width = frame.shape[:
                                    2]  #frame = frame[0:height,0:int(width*0.5)]

        # Resize and Add Noise
        if resize == True:
            frame = imutils.resize(frame,
                                   width=resize_width,
                                   height=resize_height)

        # blurring the frame that's captured
        frame_gau_blur = cv2.GaussianBlur(frame, (3, 3), 1)

        hsv = cv2.cvtColor(frame_gau_blur, cv2.COLOR_BGR2HSV)

        green_range = cv2.inRange(hsv, greenLower, greenUpper)
        res_green = cv2.bitwise_and(frame_gau_blur,
                                    frame_gau_blur,
                                    mask=green_range)
        kernel = np.ones((5, 5), np.uint8)

        # Masking
        mask = cv2.erode(res_green, kernel, iterations=2)
        mask = cv2.dilate(mask, kernel, iterations=3)
        #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

        blue_s_gray = cv2.cvtColor(res_green, cv2.COLOR_BGR2GRAY)
        blue_s_gray = cv2.morphologyEx(blue_s_gray, cv2.MORPH_OPEN, kernel)
        #blue_s_gray = cv2.morphologyEx(blue_s_gray, cv2.MORPH_CLOSE, kernel)
        canny_edge = cv2.Canny(blue_s_gray, 200, 210)  #100,110
        canny_edge = cv2.GaussianBlur(canny_edge, (5, 5), 0)
        #canny_edge = cv2.adaptiveThreshold(canny_edge, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
        # applying HoughCircles
        rows = blue_s_gray.shape[0]
        circles = cv2.HoughCircles(canny_edge,
                                   cv2.HOUGH_GRADIENT,
                                   dp=2,
                                   minDist=9,
                                   param1=10,
                                   param2=20,
                                   minRadius=0,
                                   maxRadius=0)
        cnts = cv2.findContours(blue_s_gray.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[-2]  #blue_s_gray.copy
        #cnts = imutils.grab_contours(cnts)
        center = None

        #Execute only at least one contour found
        if len(cnts) > 0 and circles is not None:
            c = max(cnts, key=cv2.contourArea)
            (x, y), radius = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            if M["m00"] != 0:
                center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
            else:
                center = (0, 0)
            # Select contours with a size bigger than 0.1
            if radius > 0.1:
                # draw the circle and center
                cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255),
                           2)
                cv2.circle(frame, center, 5, (0, 0, 255), -1)
                #print(radius)

                img = Image()
                #height, width = frame.shape[:2]
                img = bridge.cv2_to_imgmsg(frame, "bgr8")
                imagePublisher.publish(img)

                #Hold Coordinates
                coordinates_x.append(center[0])
                coordinates_y.append(center[1])

                #Free Coordinates if timer is up
                if set == 0:
                    coordinates_x = []
                    coordinates_y = []
                    xCoordinate = 0
                    yCoordinate = 0

                frameHeight = frame.shape[0]
                frameWidth = frame.shape[1]
                coordinatePublisher.publish(
                    str(mean_x) + "," + str(mean_y) + "," + str(frameWidth) +
                    "," + str(frameHeight) + "," + str(radius))

        else:
            coordinatePublisher.publish("-")
            print("-")

        cv2.imshow("Frame", frame)
        cv2.imshow("canny", canny_edge)
        cv2.imshow("blue_s_gray", blue_s_gray)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            stopFlag.set()
            break

    stopFlag.set()
    camera.release()
    cv2.destroyAllWindows()
    def image_callback(self, data):
        objArray = Detection2DArray()
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # image_hsv = cv2.cvtColor(cv_image,cv2.COLOR_BGR2HSV)

        # the array based representation of the image will be used later in order to prepare the
        # result image with boxes and labels on it.
        image_np = np.asarray(image)
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        detection_boxes = detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        detection_scores = detection_graph.get_tensor_by_name(
            'detection_scores:0')
        detection_classes = detection_graph.get_tensor_by_name(
            'detection_classes:0')
        # Number of objects detected
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        (boxes, scores, classes,
         num) = self.sess.run([
             detection_boxes, detection_scores, detection_classes,
             num_detections
         ],
                              feed_dict={image_tensor: image_np_expanded})

        objects = vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            max_boxes_to_draw=MAX_NUMBER_OF_BOXES,
            min_score_thresh=self.MINIMUM_CONFIDENCE,
            use_normalized_coordinates=True,
            line_thickness=3)

        objArray.detections = []
        objArray.header = data.header

        # Object search
        if len(objects) > 0:
            for i in range(len(objects)):
                objArray.detections.append(
                    self.object_predict(objects[i], data.header, image_np,
                                        cv_image))

        self.object_pub.publish(objArray)

        img = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        image_out = Image()

        #-- Print 'X' in the center of the camera
        image_height, image_width, channels = img.shape
        cv2.putText(img, "X", (image_width / 2, image_height / 2), font, 1,
                    (0, 0, 255), 2, cv2.LINE_AA)

        try:
            image_out = self.bridge.cv2_to_imgmsg(img, "bgr8")
        except CvBridgeError as e:
            print(e)

        image_out.header = data.header
        self.image_pub.publish(image_out)
Beispiel #38
0
obs_factor_y = 1.0
obs_factor_z = 1.0
obs_factor_yaw = 1.0

obs_offset_x = 0.0
obs_offset_y = -0.40
obs_offset_z = -0.36
obs_offset_yaw = 0 * (3.14 / 180.0)

# current state of quad
x = y = z = vx = vy = vz = roll = pitch = yaw = 0.0

pose_rel = Odometry()

img = np.zeros((480, 640, 3), np.uint8)
raw_image = Image()
bin_image = Image()
contour_image = Image()
corner_image = Image()
pose_image = Image()
debug_image = Image()

bridge = CvBridge()

pose_gate_in = Odometry()


def remove_distortion(img):
    width = img.shape[1]
    height = img.shape[0]
Beispiel #39
0
    def step(self):
        stamp = super().step()
        if not stamp:
            return
        # Publish camera data
        if self._image_publisher.get_subscription_count(
        ) > 0 or self._always_publish:
            self._wb_device.enable(self._timestep)
            image = self._wb_device.getImage()

            if image is None:
                return

            # Image data
            msg = Image()
            msg.header.stamp = stamp
            msg.header.frame_id = self._frame_id
            msg.height = self._wb_device.getHeight()
            msg.width = self._wb_device.getWidth()
            msg.is_bigendian = False
            msg.step = self._wb_device.getWidth() * 4
            # We pass `data` directly to we avoid using `data` setter.
            # Otherwise ROS2 converts data to `array.array` which slows down the simulation as it copies memory internally.
            # Both, `bytearray` and `array.array`, implement Python buffer protocol, so we should not see unpredictable
            # behavior.
            # deepcode ignore W0212: Avoid conversion from `bytearray` to `array.array`.
            msg._data = image
            msg.encoding = 'bgra8'
            self._image_publisher.publish(msg)

            self.__message_info.header.stamp = Time(
                seconds=self._node.robot.getTime()).to_msg()
            self._camera_info_publisher.publish(self.__message_info)

            if self._wb_device.hasRecognition(
            ) and (self._recognition_publisher.get_subscription_count() > 0 or
                   self._recognition_webots_publisher.get_subscription_count()
                   > 0):
                self._wb_device.recognitionEnable(self._timestep)
                objects = self._wb_device.getRecognitionObjects()

                if objects is None:
                    return

                # Recognition data
                reco_msg = Detection2DArray()
                reco_msg_webots = WbCameraRecognitionObjects()
                reco_msg.header.stamp = stamp
                reco_msg_webots.header.stamp = stamp
                reco_msg.header.frame_id = self._frame_id
                reco_msg_webots.header.frame_id = self._frame_id
                for obj in objects:
                    # Getting Object Info
                    position = Point()
                    orientation = Quaternion()
                    position.x = obj.get_position()[0]
                    position.y = obj.get_position()[1]
                    position.z = obj.get_position()[2]
                    axangle = obj.get_orientation()
                    quat = axangle2quat(axangle[:-1], axangle[-1])
                    orientation.w = quat[0]
                    orientation.x = quat[1]
                    orientation.y = quat[2]
                    orientation.z = quat[3]
                    obj_model = obj.get_model().decode('UTF-8')
                    obj_center = [
                        float(i) for i in obj.get_position_on_image()
                    ]
                    obj_size = [float(i) for i in obj.get_size_on_image()]
                    obj_id = obj.get_id()
                    obj_colors = obj.get_colors()
                    # Object Info -> Detection2D
                    reco_obj = Detection2D()
                    hyp = ObjectHypothesisWithPose()
                    hyp.id = obj_model
                    hyp.pose.pose.position = position
                    hyp.pose.pose.orientation = orientation
                    reco_obj.results.append(hyp)
                    reco_obj.bbox.center.x = obj_center[0]
                    reco_obj.bbox.center.y = obj_center[1]
                    reco_obj.bbox.size_x = obj_size[0]
                    reco_obj.bbox.size_y = obj_size[1]
                    reco_msg.detections.append(reco_obj)

                    # Object Info -> WbCameraRecognitionObject
                    reco_webots_obj = WbCameraRecognitionObject()
                    reco_webots_obj.id = obj_id
                    reco_webots_obj.model = obj_model
                    reco_webots_obj.pose.pose.position = position
                    reco_webots_obj.pose.pose.orientation = orientation
                    reco_webots_obj.bbox.center.x = obj_center[0]
                    reco_webots_obj.bbox.center.y = obj_center[1]
                    reco_webots_obj.bbox.size_x = obj_size[0]
                    reco_webots_obj.bbox.size_y = obj_size[1]
                    for i in range(0, obj.get_number_of_colors()):
                        color = ColorRGBA()
                        color.r = obj_colors[3 * i]
                        color.g = obj_colors[3 * i + 1]
                        color.b = obj_colors[3 * i + 2]
                        reco_webots_obj.colors.append(color)
                    reco_msg_webots.objects.append(reco_webots_obj)
                self._recognition_webots_publisher.publish(reco_msg_webots)
                self._recognition_publisher.publish(reco_msg)
            else:
                self._wb_device.recognitionDisable()
        else:
            self._wb_device.disable()
Beispiel #40
0
 def __init__(self):
     #rospy.init_node('camera_sub_node', anonymous=True)
     self.bridge = CvBridge()
     self.img_msg = Image()
Beispiel #41
0
 def publish_image(self, frame):
     img_msg = Image()
     img_msg.data = self.camera.frame
     self.pubs[self.image_topic] = img_msg
Beispiel #42
0
turner = Twist()
turner.linear.x = 0
turner.angular.z = 0
lastTime = 0  # time t
lastError = 0  # error at time t
changeError = 0  # derivative
totalError = 0  # integral
p = 0.009  # proportion constant
i = 0  # integral constant
d = 0  # derivative constant
dist = 0

moveStatus = False

depthData = Image()
isDepthReady = False


def depthCallback(data):
    global depthData, isDepthReady
    depthData = data
    isDepthReady = True


def blobCallback(data):
    global lastTime, changeError, totalError, lastError, moveStatus, veloError, dist
    moveStatus = True
    xPos = data.data
    error = 320 - xPos
    newTime = rospy.Time.now().to_sec()
Beispiel #43
0
def gate_finder(image):

    try:
        img = bridge.imgmsg_to_cv2(image, "bgr8")
    except CvBridgeError as e:
        print(e)

    ## Split the H channel in HSV, and get the saturation channel
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    h, s, v = cv2.split(hsv)

    sides = side_sat(s)

    #bounding_boxes contains box2D objects, have ( center (x,y), (width, height), angle of rotation )?
    #[] - bottom left
    #[] - top right
    #[] - top left
    #[] - bottom right
    bounding_boxes = find_sides(sides)

    if len(bounding_boxes) > 1:

        #get rid of negatives
        for i in range(len(bounding_boxes)):
            for j in range(len(bounding_boxes[i])):
                for k in range(len(bounding_boxes[i][j])):
                    if bounding_boxes[i][j][k] < 0:
                        bounding_boxes[i][j][k] = 0

        #subtract bottom left from top left for each bounding box:
        diff1 = abs(bounding_boxes[0][2] - bounding_boxes[0][0])
        diff2 = abs(bounding_boxes[1][2] - bounding_boxes[1][0])

        # if difference in x > diff in y delete (horizontal rectangle)
        if diff1[0] > diff1[1]:
            bounding_boxes.pop(0)
        if diff2[0] > diff2[1]:
            bounding_boxes.pop(1)

#find halfway pt between midpts

        if len(bounding_boxes) > 1:

            #find midpts (x)
            #average top, bottom midpoints
            top_mid = (bounding_boxes[0][1][0] + bounding_boxes[0][2][0]) / 2
            bot_mid = (bounding_boxes[0][0][0] + bounding_boxes[0][3][0]) / 2
            mid0 = (top_mid + bot_mid) / 2

            top_mid = (bounding_boxes[1][1][0] + bounding_boxes[1][2][0]) / 2
            bot_mid = (bounding_boxes[1][0][0] + bounding_boxes[1][3][0]) / 2
            mid1 = (top_mid + bot_mid) / 2

            image_mid = (mid0 + mid1) / 2
            mid0 = int(mid0)
            mid1 = int(mid1)
            image_mid = int(image_mid)

            output = img.copy()
            #Draw on image:
            if bounding_boxes is not None:
                for box in bounding_boxes:
                    cv2.drawContours(output, [box], 0, (0, 0, 255), 2)

            cv2.line(output, (mid1, 0), (mid1, 800), (0, 255, 0))
            cv2.line(output, (mid0, 0), (mid0, 800), (0, 255, 0))
            cv2.line(output, (image_mid, 0), (image_mid, 800), (0, 255, 0))

            print("boutta show")
            #cv2.imshow("Output", output)

            #cv2.waitKey(0)

            mid_box = [[image_mid + 10, bounding_boxes[0][0][1]],
                       [image_mid - 10, bounding_boxes[0][1][1]],
                       [image_mid + 10, bounding_boxes[0][2][1]],
                       [image_mid - 10, bounding_boxes[0][3][1]]]
            boxes = [bounding_boxes[1], mid_box]

        extractmath(boxes)
Beispiel #44
0
    def cnn(self):
        if type(self.cv_img_crop) == []:
            print "No crop image receive"
            return
        print "strat prediction"
        img_pred = self.cv_image
        for i, im in enumerate(self.cv_img_crop):
            start = time.time()
            if im is None:
                break
            im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            im = im.reshape(im.shape[0], im.shape[1], 1)

            transformer = caffe.io.Transformer(
                {'data': self.net_full_conv.blobs['data'].data.shape})
            transformer.set_transpose('data', (2, 0, 1))
            #transformer.set_raw_scale('data', 255.0)
            self.net_full_conv.blobs['data'].reshape(1, 1, 32, 100)
            transformed_image = transformer.preprocess('data', im)
            transformed_image -= np.mean(transformed_image)

            #make classification map by forward and print prediction indices at each location
            self.net_full_conv.blobs['data'].data[...] = transformed_image
            #self.net_full_conv.blobs['data'].data[...] = im
            #out = self.net_full_conv.forward(data=np.asarray(transformed_image))
            out = self.net_full_conv.forward()
            print "Prediction time:", time.time() - start

            top1 = out['prob'][0].argmax()
            x = self.cv_crop_region[i][0]
            y = self.cv_crop_region[i][1]
            w = self.cv_crop_region[i][2]
            h = self.cv_crop_region[i][3]

            if out['prob'][0][top1] >= 0.9:
                print 'class: ', top1
                print out['prob'][0][top1][0][0]

                cv2.rectangle(img_pred, (x, y), (x + w, y + h), (0, 0, 255), 3)
                print self.navi_map[top1]
                cv2.putText(img_pred, str(self.navi_map[top1][0]), (x, y),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 0), 2)

                if top1 == 4 or top1 == 2:
                    turn_right = rospy.ServiceProxy(
                        '/pbody/open_loop_intersection_control_node/turn_right',
                        Empty)
                    turn = turn_right()
                    topomap_action = rospy.ServiceProxy(
                        'topo_map_action', actions)
                    action = actions()
                    action.action = "R"
                    resp = topomap_action(action)
                    print "target node: ", resp.target_state
                elif top1 == 23:
                    turn_left = rospy.ServiceProxy(
                        '/pbody/open_loop_intersection_control_node/turn_left',
                        Empty)
                    turn = turn_left()
                    topomap_action = rospy.ServiceProxy(
                        'topo_map_action', actions)
                    action = actions()
                    action.action = "L"
                    resp = topomap_action(action)
                    print "target node: ", resp.target_state
                elif top1 == 14 or top1 == 22:
                    turn_forward = rospy.ServiceProxy(
                        '/pbody/open_loop_intersection_control_node/turn_forward',
                        Empty)
                    turn = turn_forward()
                    topomap_action = rospy.ServiceProxy(
                        'topo_map_action', actions)
                    action = actions()
                    action.action = "F"
                    resp = topomap_action(action)
                    print "target node: ", resp.target_state
                self.stop_line = 0
                break
                print "stop text spotting"

        #publish image_with_box
        image_all_pred_image = Image()
        image_all_pred_image.header = rospy.Time.now
        image_all_pred_image = self.bridge.cv2_to_imgmsg(img_pred, "bgr8")
        self.image_pred_pub.publish(image_all_pred_image)
        self.cv_img_crop = []
        self.cv_crop_region = []
Beispiel #45
0
    #escape key stops
    # while(key < 27):
    #     # H[2][2]=Z
    #     bird_image=cv2.warpPerspective(image, H, (1000,1000))
    #     plt.subplot(121),plt.imshow(image),plt.title('image')
    #     plt.subplot(122),plt.imshow(bird_image),plt.title('Perspective')
    #     plt.show()
        # key=cv2.waitKey(0)
        # if(key == 24):
        #     Z+=0.5
        #     print(key)
        # if(key == 25):
        #     Z-=0.5
        #     print(key)

    msg = Image()
    msg.header.stamp = rospy.Time.now()
    msg.height = 1000
    msg.width = 1000
    msg.step = 1000
    bridge = CvBridge()

    while True:
        try:
            bird_video=cv2.warpPerspective(_image, H, (1000,1000))
            cv2.imshow("Bird view video", bird_video)
            publisher.publish(bridge.cv2_to_imgmsg(bird_video, "bgr8"))
            cv2.waitKey(100)
        except KeyboardInterrupt:
            break
Beispiel #46
0
class predictor:
    """
        
    """
    def apply(self, y0, index):
        """
            Apply diffeomorphism number <index> to image <y0> 
        """

    def compare(self, y0, yn, size):
        """
            Compare reduced size of images. 
            Returns norm of difference in certain areas, 
            which is defined by the variance is less than 
            the average variance value. 
        """

    def find_neighbor_indices(self, y0):
        #pdb.set_trace()
        self.estimator = learner((y0.shape[1], y0.shape[0]),
                                 [4, 4]).new_estimator()
        self.estimator.init_structures(y0[:, :, 0])
        #self.neighbor_indices = estimator.neighbor_indices

    def compare_neighbor(self, y0, yn, var=NONE):
        """
            Compare fullsize of images in neighbour area
        """

        estr = learner((y0.shape[1], y0.shape[0]), [4, 4]).new_estimator()
        estr.update(y0[:, :, 0], yn[:, :, 0])
        estr.update(y0[:, :, 1], yn[:, :, 1])
        estr.update(y0[:, :, 2], yn[:, :, 2])
        n = len(estr.neighbor_similarity_flat)
        best_sim = np.array([[0]] * n)
        for k in range(n):
            best_sim[k] = max(estr.neighbor_similarity_flat[k])
        if var != NONE:
            best_sim = best_sim * var.flat
        return np.mean(best_sim)

    def apply_random_sequence(self, y0, level=5):
        """
            Apply diffeomorphisms on image <y0> in a 
            randomized sequence of depth <level>. 
            Returns the command sequence as a <level>-tuple 
            and the final image.
        """

    def search_for_plan(self, y0, yn, nitr=10, level=5):
        """
            Search for a sequence of diffeomorphisms that 
            transforms <y0> to <yn>.
            Max <nitr> random sequences is tried, max length 
            of sequences is <level>
        """
        size = [16, 12]
        for i in range(nitr):
            # get a estimated final image and the sequence to the final image
            y_est, seq = self.apply_random_sequence(yo, level)

            # compare the estimated image against the goal.
            sim = self.compare(y0, yn, size)

    def test_compare_neighbor(self, argv):
        def get_Y_pair((x0, y0), (dx, dy), (w, h), im):
            imc = im.crop((x0, y0, x0 + w, y0 + h))
            Y0 = np.array(imc.getdata(), np.uint8).reshape((h, w, 3))

            imc2 = im.crop((x0 + dx, y0 + dy, x0 + dx + w, y0 + dy + h))
            Y1 = np.array(imc2.getdata(), np.uint8).reshape((h, w, 3))
            return (Y0, Y1)

        # Find Input image
        try:
            infile = argv[argv.index('-im') + 1]
        except ValueError:
            infile = '/home/adam/git/surf12adam/diffeo_experiments/lighttower640.jpg'
        print 'Using image file:                ', infile

        im = Image.open(infile)
        print 'Compare two images close to each other'
        y0, y1 = get_Y_pair((260, 300), (2, 0), (160, 120), im)
        self.find_neighbor_indices(y0)
        print self.compare_neighbor(y0, y1)

        print 'Compare two images far from each other'
        y0, y1 = get_Y_pair((260, 300), (5, 0), (160, 120), im)
        print self.compare_neighbor(y0, y1)

        print 'Compare two images really far from each other'
        y0, y1 = get_Y_pair((260, 300), (200, 0), (160, 120), im)
        print self.compare_neighbor(y0, y1)

        print 'Compare two images close to each other with nois'
        y0, y1 = get_Y_pair((260, 300), (0, 0), (160, 120), im)
        #pdb.set_trace()
        y1 = y1 + np.random.randint(0, 10, y0.shape)
        y1 = (y1 * 255.0 / np.max(y1)).astype(np.uint8)
        #        pdb.set_trace()
        print self.compare_neighbor(y0, y1)
from transforms3d import quaternions
import roslib
import sys
import rospy
from sensor_msgs.msg import Image, Imu
import time
import math
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import colors

resolution = 0.05 #meters/pixel

np.set_printoptions(threshold=4) #see all matrix

img = Image.open("map.pgm")
area = (950, 950, 1600, 1130) #left, top, right, bottom
cropped_img = img.crop(area)
img_matrix = np.array(cropped_img)


#unknown positions of map
BW_img_des = img_matrix == 205
BW_img_des = BW_img_des * - 1
#occupied positions of the map
BW_img_oc = img_matrix == 0
BW_img_oc = BW_img_oc* 1  #0 and 1 instead of False and True

map = BW_img_des+BW_img_oc
length_map = map.shape[1]#no of columns
width_map = map.shape[0]#no of rows
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import cv2
from math import isnan
import numpy as np

imageData = Image()


def imageCallback(data):
    global imageData
    imageData = data


def listener():
    rospy.init_node('image_listener', anonymous=True)
    rospy.Subscriber("/camera/rgb/image_color", Image,
                     imageCallback)

    rospy.sleep(0.5)

    global imageData

    bridge = CvBridge()
    cvImage = bridge.imgmsg_to_cv2(imageData, desired_encoding="passthrough")
    cvImage = cv2.cvtColor(cvImage, cv2.COLOR_BGR2RGB)
Beispiel #49
0
def main(argv):

    if len(sys.argv) < 2:
        print 'Please specify data directory file'
        return 1

    if len(sys.argv) < 3:
        print 'Please specify output rosbag file'
        return 1
    bag = rosbag.Bag(sys.argv[2], 'w')
    ral = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/RawAccel.csv"))
    rgo = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/RawGyro.csv"))
    gps = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/OnboardGPS.csv"))
    gta = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/GroundTruthAGM.csv"))
    gtl = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/GroundTruthAGL.csv"))
    bap = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/BarometricPressure.csv"))
    onp = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/OnboardPose.csv"))
    ori = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/RawOrien.csv"))
    pos = csv.reader(open(sys.argv[1] + "/LogFiles_tmp/OnboardPose.csv"))


    ral_seq = 0
    bap_seq = 0
    img_seq = 0
    pos_seq = 0
    cal = -1
    gps_seq = 0
    # IMAGE_COUNT = 81169
    STREET_VIEW = 113

    print("packageing Imu for OnboardPose.csv ...")

    for pos_data in pos:
        pos_seq = pos_seq + 1
        utime = int(pos_data[0])
        timestamp = rospy.Time.from_sec(utime/1e6)

        imu = Imu()
        imu.header.seq = pos_seq
        imu.header.stamp = timestamp
        imu.header.frame_id = '/Imu'

        imu.linear_acceleration.x = float(pos_data[4])
        imu.linear_acceleration.y = float(pos_data[5])
        imu.linear_acceleration.z = float(pos_data[6])
        imu.linear_acceleration_covariance = np.zeros(9)

        imu.angular_velocity.x = float(pos_data[1])
        imu.angular_velocity.y = float(pos_data[2])
        imu.angular_velocity.z = float(pos_data[3])
        imu.angular_velocity_covariance = np.zeros(9)

        imu.orientation.w = float(pos_data[14])
        imu.orientation.x = float(pos_data[15])
        imu.orientation.y = float(pos_data[16])
        imu.orientation.z = float(pos_data[17])

        bag.write('/Imu', imu, t=timestamp)

        pos_seq = pos_seq + 1
        imu.header.seq = pos_seq
        bag.write('/Imu', imu, t=timestamp)



        # if cal < 0:
        #     Caminfo = CameraInfo()
        #     cam_data = np.load(sys.argv[1] + '/calibration_data.npz')
        #     Caminfo.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        #     Caminfo.P = [1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]
        #     Caminfo.D = np.asarray(cam_data['distCoeff']).reshape(-1)
        #     Caminfo.K = np.asarray(cam_data['intrinsic_matrix']).reshape(-1)
        #     Caminfo.binning_x = 1
        #     Caminfo.binning_y = 1
        #     img_cv_h, img_cv_w = img_cv.shape[:2]
        #     Caminfo.width = img_cv_w
        #     Caminfo.height = img_cv_h
        #     Caminfo.distortion_model = 'plumb_bob'
        #     bag.write('/camera/camera_info', Caminfo, t=timestamp)
        #     cal = 0

    print("Package groundtruthAGM...")
    for gta_data in gta:
        Igt = Int8MultiArray()
        Igt.data = [int(gta_data[2]), int(gta_data[3]), int(gta_data[4])]
        bag.write('/groundtruth/sv_id', Igt)

    print("Package groundtruthIMAGE...")
    for stv_seq in range(STREET_VIEW):
        # print(stv_seq)
        img_cv = cv2.imread(sys.argv[1] + "/Street View Images/left-" + '{0:03d}'.format(stv_seq+1) + ".jpg", 1)
        br = CvBridge()
        Img = Image()
        Img = br.cv2_to_imgmsg(img_cv, "bgr8")
        Img.header.seq = stv_seq
        # Img.header.stamp = timestamp
        Img.header.frame_id = 'streetview'
        bag.write('/groundtruth/image', Img)

    for bap_data in bap:
        bap_seq = bap_seq + 1
        bar = Barometer()
        bar.altitude = float(bap_data[2])
        bar.pressure = float(bap_data[1])
        bar.temperature = float(bap_data[3])
        bar.header.seq - int(bap_seq)
        bar.header.frame_id = 'BarometricPressure'
        bag.write('/barometric_pressure', bar)

    print("Packaging GPS and cam_info")
    for gps_data in gps:

        imgid = int(gps_data[1])

        utime = int(gps_data[0])
        timestamp = rospy.Time.from_sec(utime / 1e6)

        header = Header()
        header.seq = imgid
        header.stamp = timestamp
        header.frame_id = 'gps'

        Gps = NavSatFix()
        Gps.header = header
        Gps.status.service = 1
        Gps.latitude = float(gps_data[2])
        Gps.longitude = float(gps_data[3])
        Gps.altitude = float(gps_data[4])
        bag.write('/gps', Gps, t=timestamp)

        if imgid <= MAVIMAGE and imgid % 3 == 0:
            # write aerial image
            img_seq = img_seq+1
            img_cv = cv2.imread(sys.argv[1] + "/MAV Images/" + '{0:05d}'.format(int(imgid)) + ".jpg", 1)
            img_cv = cv2.resize(img_cv, MAVDIM, interpolation=cv2.INTER_AREA)
            cv2.imshow('1', img_cv)
            br = CvBridge()
            Img = Image()
            Img = br.cv2_to_imgmsg(img_cv, "bgr8")
            Img.header.seq = int(img_seq)
            print(imgid)
            Img.header.stamp = timestamp
            Img.header.frame_id = 'camera'
            bag.write('/camera/image', Img, t=timestamp)

    bag.close()
    return 0
Beispiel #50
0
            cv2.rectangle(imgcv, (box[0], box[1]), (box[2], box[3]),
                          colors[cls_indx], thick)
            cv2.putText(imgcv, mess, (box[0], box[1] - 7), 0, 1e-3 * h,
                        colors[cls_indx], thick // 3)

    pub_bbox = rospy.Publisher("m2det_ros/detetct_result",
                               BoundingBoxes,
                               queue_size=1)
    pub_bbox.publish(boundingBoxes)

    if show_result == True:
        cv2.imshow("result", imgcv)
        cv2.waitKey(1)


captureImage = Image()


def imgCallback(msg):
    global captureImage
    captureImage = msg

    try:
        cv_img = CvBridge().imgmsg_to_cv2(captureImage, "bgr8")

        w, h = cv_img.shape[1], cv_img.shape[0]
        img = _preprocess(cv_img).unsqueeze(0)

        if cfg.test_cfg.cuda:
            img = img.cuda()
Beispiel #51
0
#!/usr/bin/env python
import roslib
#roslib.load_manifest('rosopencv')
import sys
import rospy
import cv2
import math
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from copy import deepcopy
import imutils

colorImage = Image()
isColorImageReady = False
greenLower = (29, 86, 6)  # rgb values
greenUpper = (64, 255, 255)
boundsInit = True
bridge = ""
ratio = 1


def calculateLineDistance(pt1, pt2):
    dist = math.sqrt((pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2)
    return dist


def getTipPoint(pts):
    #print(str(pts))
    #Calculate line distances
    line1 = calculateLineDistance(pts[0][0], pts[1][0])
    def cbRegion(self, region_msg):
        # ***************************************************************
        # Make sure that the device is working
        # ***************************************************************

        t_start = time.clock()
        if self.device_work is True:

            # ***************************************************************
            # Preprocessing
            # ***************************************************************
            if (region_msg.image_region.width is 0):
                return
            img = self.bridge.imgmsg_to_cv2(region_msg.image_region, "bgr8")
            img = cv2.resize(img, self.dim)
            img = img.astype(np.float32)
            img_m = np.zeros((self.dim[0], self.dim[1], 3), np.float32)
            img_m[:] = (104, 116, 122)
            img = cv2.subtract(img, img_m)

            # ***************************************************************
            # Send the image to NCS and get the result
            # ***************************************************************
            self.graph.LoadTensor(img.astype(np.float16), 'user object')
            output, userobj = self.graph.GetResult()
            order = output.argsort()[::-1][:4]

            # ***************************************************************
            # Probability thresold
            # ***************************************************************
            if (output[order[0]] * 100 >= 95):
                #cv2.imwrite("/home/tony/ncs/ncsdk/examples/caffe/CaffeNet/image/" + str(self.img_count) + ".jpg", img_cv)
                print('\n-------region predictions --------')
                x = region_msg.x
                y = region_msg.y
                w = region_msg.width
                h = region_msg.height
                if str(order[0]) is str(0):
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (255, 0, 0), 3)
                    cv2.putText(self.img, 'blue_circle', (x - 50, y - 5),
                                cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 0), 1)
                elif str(order[0]) is str(1):
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (0, 255, 0), 3)
                    cv2.putText(self.img, 'green_triangle', (x - 50, y - 5),
                                cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 255, 0), 1)
                elif str(order[0]) is str(2):
                    cv2.rectangle(self.img, (x, y), (x + w, y + h),
                                  (0, 0, 255), 3)
                    cv2.putText(self.img, 'red_cross', (x - 50, y - 5),
                                cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 0, 255), 1)
                elif str(order[0]) is str(3):
                    cv2.rectangle(self.img, (x, y), (x + w, y + h), (0, 0, 0),
                                  3)
                    cv2.putText(self.img, 'background', (x - 50, y - 5),
                                cv2.FONT_HERSHEY_COMPLEX, 0.3, (0, 0, 0), 1)
                img_msg = Image()
                img_msg = self.bridge.cv2_to_imgmsg(self.img, "bgr8")
                self.pub_image_pred.publish(img_msg)

                for i in range(0, 4):
                    print str(
                        self.pred_count), (' prediction ' + str(i) +
                                           ' (probability ' +
                                           str(output[order[i]] * 100) +
                                           '%) is ' + self.labels[order[i]] +
                                           '  label index is: ' +
                                           str(order[i]))
                self.pred_count += 1
        print "time taken = ", time.clock() - t_start
Beispiel #53
0
    def image_cb(self, data):
        objArray = Detection2DArray()
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print(e)
        image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # the array based representation of the image will be used later in order to prepare the
        # result image with boxes and labels on it.
        image_np = np.asarray(image)
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        scores = detection_graph.get_tensor_by_name('detection_scores:0')
        classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        (boxes, scores, classes, num_detections) = self.sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})

        objects = vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=2)

        objArray.detections = []
        objArray.header = data.header
        object_count = 1

        for i in range(len(objects)):
            object_count += 1
            objArray.detections.append(
                self.object_predict(objects[i], data.header, image_np,
                                    cv_image))

        self.object_pub.publish(objArray)

        #EXTRA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~DOWN
        if object_count == 1:
            self.lane_change_pub.publish(0)
        else:
            self.lane_change_pub.publish(1)

#EXTRA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~UP

        img = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
        image_out = Image()
        try:
            image_out = self.bridge.cv2_to_imgmsg(img, "bgr8")
        except CvBridgeError as e:
            print(e)
        image_out.header = data.header
        self.image_pub.publish(image_out)
Beispiel #54
0
    def __init__(self,
                 joint_increment=None,
                 sim_time_factor=0.001,
                 running_step=0.001,
                 random_object=False,
                 random_position=False,
                 use_object_type=False,
                 populate_object=False,
                 env_object_type='free_shapes'):
        """
        initializing all the relevant variables and connections
        :param joint_increment: increment of the joints
        :param running_step: gazebo simulation time factor
        :param random_object: spawn random object in the simulation
        :param random_position: change object position in each reset
        :param use_object_type: assign IDs to objects and used them in the observation space
        :param populate_object: to populate object(s) in the simulation using sdf file
        :param env_object_type: object type for environment, free_shapes for boxes while others are related to use_case
            'door_handle', 'combox', ...
        """

        # Assign Parameters
        self._joint_increment = joint_increment
        self.running_step = running_step
        self._random_object = random_object
        self._random_position = random_position
        self._use_object_type = use_object_type
        self._populate_object = populate_object

        # Assign MsgTypes
        self.joints_state = JointState()
        self.contact_1_state = ContactsState()
        self.contact_2_state = ContactsState()
        self.collisions = Bool()
        self.camera_rgb_state = Image()
        self.camera_depth_state = Image()
        self.contact_1_force = Vector3()
        self.contact_2_force = Vector3()
        self.gripper_state = VacuumGripperState()

        self._list_of_observations = [
            "distance_gripper_to_object", "elbow_joint_state",
            "shoulder_lift_joint_state", "shoulder_pan_joint_state",
            "wrist_1_joint_state", "wrist_2_joint_state",
            "wrist_3_joint_state", "contact_1_force", "contact_2_force",
            "object_pos_x", "object_pos_y", "object_pos_z",
            "min_distance_gripper_to_object"
        ]

        if self._use_object_type:
            self._list_of_observations.append("object_type")

        # Establishes connection with simulator
        """
        1) Gazebo Connection 
        2) Controller Connection
        3) Joint Publisher 
        """
        self.gazebo = GazeboConnection(sim_time_factor=sim_time_factor)
        self.controllers_object = ControllersConnection()
        self.pickbot_joint_pubisher_object = JointPub()

        # Define Subscribers as Sensordata
        """
        1) /pickbot/joint_states
        2) /gripper_contactsensor_1_state
        3) /gripper_contactsensor_2_state
        4) /gz_collisions

        not used so far but available in the environment 
        5) /pickbot/gripper/state
        6) /camera_rgb/image_raw   
        7) /camera_depth/depth/image_raw
        """
        rospy.Subscriber("/pickbot/joint_states", JointState,
                         self.joints_state_callback)
        rospy.Subscriber("/gripper_contactsensor_1_state", ContactsState,
                         self.contact_1_callback)
        rospy.Subscriber("/gripper_contactsensor_2_state", ContactsState,
                         self.contact_2_callback)
        rospy.Subscriber("/gz_collisions", Bool, self.collision_callback)
        # rospy.Subscriber("/pickbot/gripper/state", VacuumGripperState, self.gripper_state_callback)
        # rospy.Subscriber("/camera_rgb/image_raw", Image, self.camera_rgb_callback)
        # rospy.Subscriber("/camera_depth/depth/image_raw", Image, self.camera_depth_callback)

        # Define Action and state Space and Reward Range
        """
        Action Space: Box Space with 6 values.
        
        State Space: Box Space with 12 values. It is a numpy array with shape (12,)

        Reward Range: -infinity to infinity 
        """
        # Directly use joint_positions as action
        if self._joint_increment is None:
            low_action = np.array([
                -(math.pi - 0.05), -(math.pi - 0.05), -(math.pi - 0.05),
                -(math.pi - 0.05), -(math.pi - 0.05), -(math.pi - 0.05)
            ])

            high_action = np.array([
                math.pi - 0.05, math.pi - 0.05, math.pi - 0.05, math.pi - 0.05,
                math.pi - 0.05, math.pi - 0.05
            ])
        else:  # Use joint_increments as action
            low_action = np.array([
                -self._joint_increment, -self._joint_increment,
                -self._joint_increment, -self._joint_increment,
                -self._joint_increment, -self._joint_increment
            ])

            high_action = np.array([
                self._joint_increment, self._joint_increment,
                self._joint_increment, self._joint_increment,
                self._joint_increment, self._joint_increment
            ])

        self.action_space = spaces.Box(low_action, high_action)

        high = np.array([
            999,  # distance_gripper_to_object
            math.pi,  # elbow_joint_state
            math.pi,  # shoulder_lift_joint_state
            math.pi,  # shoulder_pan_joint_state
            math.pi,  # wrist_1_joint_state
            math.pi,  # wrist_2_joint_state
            math.pi,  # wrist_3_joint_state
            np.finfo(np.float32).max,  # contact_1_force
            np.finfo(np.float32).max,  # contact_2_force
            1,  # object_pos_x
            1.4,  # object_pos_y
            1.5,  # object_pos_z
            999
        ])  # min_distance_gripper_to_object

        low = np.array([
            0,  # distance_gripper_to_object
            -math.pi,  # elbow_joint_state
            -math.pi,  # shoulder_lift_joint_state
            -math.pi,  # shoulder_pan_joint_state
            -math.pi,  # wrist_1_joint_state
            -math.pi,  # wrist_2_joint_state
            -math.pi,  # wrist_3_joint_state
            0,  # contact_1_force
            0,  # contact_2_force
            -1,  # object_pos_x
            0,  # object_pos_y
            0,  # object_pos_z
            0
        ])  # min distance

        if self._use_object_type:
            high = np.append(high, 9)
            low = np.append(low, 0)

        self.observation_space = spaces.Box(low, high)
        self.reward_range = (-np.inf, np.inf)

        self._seed()
        self.done_reward = 0

        # set up everything to publish the Episode Number and Episode Reward on a rostopic
        self.episode_num = 0
        self.accumulated_episode_reward = 0
        self.episode_steps = 0
        self.reward_pub = rospy.Publisher('/openai/reward',
                                          RLExperimentInfo,
                                          queue_size=1)
        self.reward_list = []
        self.episode_list = []
        self.step_list = []
        self.csv_name = logger.get_dir() + '/result_log'
        print("CSV NAME")
        print(self.csv_name)
        self.csv_success_exp = logger.get_dir(
        ) + "/success_exp" + datetime.datetime.now().strftime(
            '%Y-%m-%d_%Hh%Mmin') + ".csv"
        self.success_2_contact = 0
        self.success_1_contact = 0

        # object name: name of the target object
        # object type: index of the object name in the object list
        # object list: pool of the available objects, have at least one entry
        self.object_name = ''
        self.object_type_str = ''
        self.object_type = 0
        self.object_list = U.get_target_object(env_object_type)
        print("object list {}".format(self.object_list))
        self.object_initial_position = Pose(
            position=Point(x=-0.13, y=0.848, z=1.06),  # x=0.0, y=0.9, z=1.05
            orientation=quaternion_from_euler(0.002567, 0.102, 1.563))
        self.pickbot_initial_position = Pose(position=Point(x=0.0,
                                                            y=0.0,
                                                            z=1.12),
                                             orientation=Quaternion(x=0.0,
                                                                    y=0.0,
                                                                    z=0.0,
                                                                    w=1.0))

        if self._populate_object:
            # populate objects from object list
            self.populate_objects()

        # select first object, set object name and object type
        # if object is random, spawn random object
        # else get the first entry of object_list
        self.set_target_object(random_object=self._random_object,
                               random_position=self._random_position)

        # The distance between gripper and object, when the arm is in initial pose
        self.max_distance, _ = U.get_distance_gripper_to_object()
        # The minimum distance between gripper and object during training
        self.min_distance = 999
Beispiel #55
0
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse

import rospy
from ros_faster_rcnn.msg import *
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import threading

RUNNING = False
IMAGE1 = Image()
IMAGE2 = Image()


def detect(image):
    """Detect object classes in an image using pre-computed object proposals."""
    global NET

    # Detect all object classes and regress object bounds
    rospy.loginfo("Starting detection")
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(NET, image)
    timer.toc()
    rospy.loginfo('Detection took %f seconds for %d object proposals',
                  timer.total_time, boxes.shape[0])
Beispiel #56
0
from sensor_msgs.msg import Image

if __name__ == '__main__':
    from options import args
    cap = cv2.VideoCapture(args.input)
    # Camera Settings
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, args.camera_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, args.camera_height)
    if (args.camera_fps):
        cap.set(cv2.CAP_PROP_FPS, args.camera_fps)

    # Init ROS Node
    image_pub = rospy.Publisher('/camera/image_raw', Image, queue_size=10)
    rospy.init_node('camera', anonymous=True)

    i = 0
    while not rospy.is_shutdown():
        msg = Image()
        ret, frame = cap.read()
        # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        size = frame.size
        rows, cols, c = frame.shape
        msg.height = rows
        msg.width = cols
        msg.encoding = 'bgr8'
        msg.step = size//rows
        msg.data = list(frame.reshape(size))
        msg.header.frame_id = str(i)
        image_pub.publish(msg)
        i += 1
    def cb_image(self, msg_img):

        self.img = self.cv_bridge.compressed_imgmsg_to_cv2(msg_img, "bgr8")

        if self.totem_list is None:
            return

        # ================================================
        #   Get Transformation matrix
        #   bewteen odom and velodyne, lidar and camera
        # ================================================
        trans = None
        while trans is None:
            try:
                (trans, qua) = self.tf_listener.lookupTransform(
                    "/velodyne", "/odom", rospy.Time(0))
            except (tf.LookupException):
                print "Nothing Happen"

        transform_matrix_odom_velodyne = self.tf_transformer.fromTranslationRotation(
            trans, qua)
        transform_matrix_lidar_camera = self.tf_transformer.fromTranslationRotation(
            self.translation_lidar_camera, self.quaternion_lidar_camera)

        # ================================================
        #   Find totem color and update the totem_list
        # ================================================

        for totem in self.totem_list:

            # Don't process the totem > threshold
            if self.distance(self.wamv_position, totem) >= 15:
                pass
            else:
                # Transform pose from odom to velodyne
                totem_position = np.zeros((4, 1))
                totem_position[0, 0] = totem.x  # x
                totem_position[1, 0] = totem.y  # y
                totem_position[2, 0] = totem.z  # z
                totem_position[3, 0] = 1
                lidar_totem_position = np.matmul(
                    transform_matrix_odom_velodyne, totem_position)
                # Transform pose from velodyne to camera
                camera_totem_position = np.matmul(
                    transform_matrix_lidar_camera, lidar_totem_position)

                totem_image_x = self.camera_matrix[0] * (
                    -camera_totem_position[1, 0] /
                    camera_totem_position[0, 0]) + self.camera_matrix[2]
                totem_image_y = self.camera_matrix[4] * (
                    -camera_totem_position[2, 0] /
                    camera_totem_position[0, 0]) + self.camera_matrix[5]
                totem_image_x = int(totem_image_x)
                totem_image_y = int(totem_image_y)


                if camera_totem_position[0, 0] >=0 and totem_image_x >= 140 and \
                    totem_image_y >= 150 and totem_image_y <= self.img.shape[0]-200 and totem_image_x <= self.img.shape[1]-140 :
                    #img_roi = self.img[totem_image_y-70:totem_image_y+50, totem_image_x-90:totem_image_x+90].copy()
                    img_roi = self.img[totem_image_y - 140:totem_image_y + 70,
                                       totem_image_x - 120:totem_image_x +
                                       120].copy()

                    color, img_mask = self.color_detect.get_color_and_image_mask(
                        img_roi)

                    if len(totem.color_record) < 10:
                        msg_img_mask = Image()
                        msg_img_mask = self.cv_bridge.cv2_to_imgmsg(
                            img_mask, "bgr8")
                        self.pub_image_mask.publish(msg_img_mask)

                        totem.color_record.append(color)
                        max_color = max(totem.color_record,
                                        key=totem.color_record.count)
                        totem.color = max_color

                        color_record_remove_none = list(totem.color_record)
                        color_record_remove_none = filter(
                            lambda a: a != "NONE", color_record_remove_none)
                        second_max_color = "NONE"

                        if len(color_record_remove_none) > 0:
                            second_max_color = max(
                                color_record_remove_none,
                                key=color_record_remove_none.count)
                        if max_color is "NONE":
                            totem.color = second_max_color
                        if len(totem.color_record
                               ) is 10 and totem.color == "NONE":
                            totem.color = "White"
                        print totem.color

                    msg_img_roi = Image()
                    msg_img_roi_compressed = CompressedImage()
                    msg_img_roi = self.cv_bridge.cv2_to_imgmsg(img_roi, "bgr8")
                    msg_img_roi_compressed = self.cv_bridge.cv2_to_compressed_imgmsg(
                        img_roi)
                    self.pub_image_roi.publish(msg_img_roi)
                    self.pub_image_roi_compressed.publish(
                        msg_img_roi_compressed)

        if len(self.totem_list) != 0:
            self.draw_totem()
Beispiel #58
0
    def imageCallback(self, msg):
        '''!
            Recebe a imagem, seg,emta e publica
        '''

        img = np.array(msg.data, dtype=np.uint8)
        img = np.array(np.split(img, msg.height))
        img = np.array(np.split(img, msg.width, axis=1))

        img = np.rot90(img)

        img = tf.keras.preprocessing.image.array_to_img(img)
        image_array = tf.keras.preprocessing.image.img_to_array(img)

        result = self.bodypix_model.predict_single(image_array)
        mask = result.get_mask(threshold=0.75)
        colored_mask = result.get_colored_part_mask(mask)

        mask = mask.numpy().astype(np.uint8)
        colored_mask = colored_mask.astype(np.uint8)

        maskMsg = Image()
        maskMsg._data = mask.flatten().tolist()
        maskMsg.height = mask.shape[0]
        maskMsg.width = mask.shape[1]
        maskMsg.encoding = "8UC1"
        maskMsg.is_bigendian = 0
        maskMsg.step = maskMsg.width

        colMsg = Image()
        colMsg._data = colored_mask.flatten().tolist()
        colMsg.height = colored_mask.shape[0]
        colMsg.width = colored_mask.shape[1]
        colMsg.encoding = "8UC3"
        colMsg.is_bigendian = 0
        colMsg.step = colMsg.width * 3

        maskMsg.header = msg.header
        colMsg.header = msg.header

        self.pubMask.publish(maskMsg)
        self.colMask.publish(colMsg)
	def __init__(self):
		'''
		This class is used to transmit data from morse node to a transparent node used in ros airship you will have to modify it.
		'''
		# Get the ~private namespace parameters from command line or launch file.
		init_message = rospy.get_param('~message', 'hello')
		self.rate = float(rospy.get_param('~rate', '30.0'))
		self.rate_before_sleep = self.rate
		self.topic_root = rospy.get_param('~topic', 'trejectory_planner')
		self.topic_hardcom = rospy.get_param('~topic_hardcom', 'hardcom')
		self.topic_camcom = rospy.get_param('~topic_camcom', 'cameracom')
		self.topic_command = rospy.get_param('~topic_command', 'command')
		self.topic_odometry = rospy.get_param('~topic_odometry', 'odometry')
		rospy.loginfo("airship/" + self.topic_root + "/rate = " + str(self.rate) + "Hz")

		#Defining published messages
		'''
		RANGE FIELDS:
		Header header
		uint8 ULTRASOUND=0
		uint8 INFRARED=1
		uint8 radiation_type
		float32 field_of_view
		float32 min_range
		float32 max_range
		float32 range
		'''
		self.range = Range()

		'''
		IMU FIELDS:
		Header header

		geometry_msgs/Quaternion orientation
		float64[9] orientation_covariance # Row major about x, y, z axes

		geometry_msgs/Vector3 angular_velocity
		float64[9] angular_velocity_covariance # Row major about x, y, z axes

		geometry_msgs/Vector3 linear_acceleration
		float64[9] linear_acceleration_covariance # Row major x, y z 
		'''
		self.imu = Imu()

		'''
		ODOMETRY FIELDS
		std_msgs/Header header
		  uint32 seq
		  time stamp
		  string frame_id
		string child_frame_id
		geometry_msgs/PoseWithCovariance pose
		  geometry_msgs/Pose pose
		    geometry_msgs/Point position
		      float64 x
		      float64 y
		      float64 z
		    geometry_msgs/Quaternion orientation
		      float64 x
		      float64 y
		      float64 z
		      float64 w
		  float64[36] covariance
		geometry_msgs/TwistWithCovariance twist
		  geometry_msgs/Twist twist
		    geometry_msgs/Vector3 linear
		      float64 x
		      float64 y
		      float64 z
		    geometry_msgs/Vector3 angular
		      float64 x
		      float64 y
		      float64 z
		  float64[36] covariance
		'''
		self.estimated_pose = Odometry()
		self.image = Image()
		self.viz_img = Image()

		'''
		POSE FIELDS:
		# A representation of pose in free space, composed of position and orientation.

		Point position
		POINT FIELDS:
		# This contains the position of a point in free space
			float64 x
			float64 y
			float64 z

		Quaternion orientation
		QUATERNION FIELDS
			float64 x
			float64 y
			float64 z
			float64 w
		'''
		#self.wish_pose = Pose()

		'''
		Vector3 FIELDS:

		float64 x
		float64 y
		float64 z
		'''
		self.wish_command = Vector3() # x : speed command ; y : elevation command ; z : rudder command 

		self.is_line_tracking = False
		self.is_trajectory_tracking = False
		self.is_sleep_state = False

		self.bridge = CvBridge()

		#Defining the subscriber
		rospy.Subscriber(self.topic_hardcom+"/LidarRange", Range, self.call_range)
		rospy.Subscriber(self.topic_hardcom+"/InertialData", Imu, self.call_imu)
		rospy.Subscriber(self.topic_command+"/isSleep", Bool, self.call_sleep)
		rospy.Subscriber("cameracom/CameraImg", Image, self.call_image)
		rospy.Subscriber("pose2odom/EstimatedPose", Odometry, self.call_estimated_pose)

		#Defining all the publisher
		self.wish_command_pub = rospy.Publisher(self.topic_root+"/WishCommand", Vector3, queue_size=10)
		self.vizualisation_pub = rospy.Publisher("planner/linetrack_viz", Image, queue_size=10)

		#Defining service to set tracking mode
		self.proceed_line_tracking = rospy.Service(self.topic_root+"/proceed_line_tracking", Trigger, self.line_track)
		self.proceed_point_tracking = rospy.Service(self.topic_root+"/proceed_point_tracking", TrigPointTracking, self.point_track)

		#Defining service to set publish rate
		self.set_rate_service = rospy.Service(self.topic_root+"/set_rate", SetRate, self.set_rate)
		self.linetracker = lt.LineTracking()

		while not rospy.is_shutdown():
			if not self.is_sleep_state:
				#Publish the wishpose
				
				if self.is_line_tracking:
					(r, p, y) = tf.transformations.euler_from_quaternion([self.estimated_pose.pose.pose.orientation.x, self.estimated_pose.pose.pose.orientation.y, 
																	self.estimated_pose.pose.pose.orientation.z, self.estimated_pose.pose.pose.orientation.w])

					img = self.bridge.imgmsg_to_cv2(self.image, "bgr8")
					unit_dir, unit_line_dir, sum_vect, frame, edge = self.linetracker.update(img, [r,p,y], self.estimated_pose.pose.pose.position.z)
					self.viz_img = self.bridge.cv2_to_imgmsg(frame, encoding="bgr8")

					unit = sum_vect

					yaw = (np.arctan(unit[1]/unit[0])) + y

					self.wish_command.x = 0.5
					self.wish_command.y = 0.
					self.wish_command.z = yaw
					self.vizualisation_pub.publish(self.viz_img)
					self.wish_command_pub.publish(self.wish_command)

				#this is the rate of the publishment.

				if self.rate:
					rospy.sleep(1/self.rate)
				else:
					rospy.sleep(1.0)

		self.publish_onShutdown()		
Beispiel #60
-1
    def default(self, ci='unused'):
        """ Publish the data of the Camera as a ROS Image message. """
        if not self.component_instance.capturing:
            return # press [Space] key to enable capturing

        image_local = self.data['image']

        image = Image()
        image.header = self.get_ros_header()
        image.header.frame_id += '/base_image'
        image.height = self.component_instance.image_height
        image.width = self.component_instance.image_width
        image.encoding = 'rgba8'
        image.step = image.width * 4

        # VideoTexture.ImageRender implements the buffer interface
        image.data = bytes(image_local)

        # sensor_msgs/CameraInfo [ http://ros.org/wiki/rviz/DisplayTypes/Camera ]
        # fill this 3 parameters to get correcty image with stereo camera
        Tx = 0
        Ty = 0
        R = [1, 0, 0, 0, 1, 0, 0, 0, 1]

        intrinsic = self.data['intrinsic_matrix']

        camera_info = CameraInfo()
        camera_info.header = image.header
        camera_info.height = image.height
        camera_info.width = image.width
        camera_info.distortion_model = 'plumb_bob'
        camera_info.K = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2],
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2],
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2]]
        camera_info.R = R
        camera_info.P = [intrinsic[0][0], intrinsic[0][1], intrinsic[0][2], Tx,
                         intrinsic[1][0], intrinsic[1][1], intrinsic[1][2], Ty,
                         intrinsic[2][0], intrinsic[2][1], intrinsic[2][2], 0]

        self.publish(image)
        self.topic_camera_info.publish(camera_info)