示例#1
0
    def hough_it(self, n_ball, iteration):
        # create gray scale image of balls
        gray_image = cv.CreateImage((self.width, self.height), 8, 1)
        cv.CvtColor(self.cv_image, gray_image, cv.CV_BGR2GRAY)

        # create gray scale array of balls
        gray_array = self.cv2array(gray_image)

        # find Hough circles
        circles = cv2.HoughCircles(gray_array, cv.CV_HOUGH_GRADIENT, 1, 40, param1=50,  \
                  param2=self.hough_accumulator, minRadius=self.hough_min_radius,       \
                  maxRadius=self.hough_max_radius)

        # Check for at least one ball found
        if circles is None:
            # display no balls found message on head display
            self.splash_screen("no balls", "found")
            # no point in continuing so exit with error message
            sys.exit("ERROR - hough_it - No golf balls found")

        circles = numpy.uint16(numpy.around(circles))

        ball_data = {}
        n_balls = 0

        circle_array = numpy.asarray(self.cv_image)

        # check if golf ball is in ball tray
        for i in circles[0, :]:
            # convert to baxter coordinates
            ball = self.pixel_to_baxter((i[0], i[1]), self.tray_distance)

            if self.is_near_ball_tray(ball):
                # draw the outer circle in red
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 0, 255), 2)
                # draw the center of the circle in red
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 0, 255), 3)
            elif i[1] > 800:
                # draw the outer circle in red
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 0, 255), 2)
                # draw the center of the circle in red
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 0, 255), 3)
            else:
                # draw the outer circle in green
                cv2.circle(circle_array, (i[0], i[1]), i[2], (0, 255, 0), 2)
                # draw the center of the circle in green
                cv2.circle(circle_array, (i[0], i[1]), 2, (0, 255, 0), 3)

                ball_data[n_balls] = (i[0], i[1], i[2])
                n_balls += 1

        circle_image = cv.fromarray(circle_array)

        cv.ShowImage("Hough Circle", circle_image)

        # 3ms wait
        cv.WaitKey(3)

        # display image on head monitor
        font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 1)
        position = (30, 60)
        s = "Searching for golf balls"
        cv.PutText(circle_image, s, position, font, self.white)
        msg = cv_bridge.CvBridge().cv_to_imgmsg(circle_image, encoding="bgr8")
        self.pub.publish(msg)

        if self.save_images:
            # save image of Hough circles on raw image
            file_name = self.image_dir                                                 \
                      + "hough_circle_" + str(n_ball) + "_" + str(iteration) + ".jpg"
            cv.SaveImage(file_name, circle_image)

        # Check for at least one ball found
        if n_balls == 0:  # no balls found
            # display no balls found message on head display
            self.splash_screen("no balls", "found")
            # less than 12 balls found, no point in continuing, exit with error message
            sys.exit("ERROR - hough_it - No golf balls found")

        # select next ball and find it's position
        next_ball = self.find_next_golf_ball(ball_data, iteration)

        # find best gripper angle to avoid touching neighbouring ball
        angle = self.find_gripper_angle(next_ball, ball_data)

        # return next golf ball position and pickup angle
        return next_ball, angle
示例#2
0
    def _cb(self, img_msg, pda_msg):
        br = cv_bridge.CvBridge()
        img = br.imgmsg_to_cv2(img_msg, desired_encoding="bgr8")
        curGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        people_poses = pda_msg.poses

        wrist_pose = None
        elbow_pose = None
        neck_pose = None

        has_waving_person = False

        print len(people_poses)

        if self.preGray is not None:
            for person_pose in people_poses:
                for limb_prefix in ['R', 'L']:
                    try:
                        wrist_index = person_pose.limb_names.index(
                            limb_prefix + "Wrist")
                        elbow_index = person_pose.limb_names.index(
                            limb_prefix + "Elbow")
                        neck_index = person_pose.limb_names.index("Neck")
                    except ValueError:
                        continue

                    if not np.all(
                            np.array(person_pose.confidences)[[
                                wrist_index, elbow_index, neck_index
                            ]] > self.arms_score_threshold):
                        continue

                    wrist_pose = person_pose.poses[wrist_index]
                    elbow_pose = person_pose.poses[elbow_index]
                    neck_pose = person_pose.poses[neck_index]

                    if (elbow_pose.position.y > wrist_pose.position.y):

                        wrist_elbow_dist = int(
                            ((wrist_pose.position.x - elbow_pose.position.x)**2
                             + (wrist_pose.position.y - elbow_pose.position.y)
                             **2)**0.5)
                        y_tl = int(
                            max(0, elbow_pose.position.y -
                                wrist_elbow_dist * 1.5))
                        x_tl = int(
                            max(0,
                                elbow_pose.position.x - wrist_elbow_dist / 2))
                        width = int(
                            min(480 - x_tl, 640 - y_tl,
                                wrist_elbow_dist * 1.5))
                        height = width

                        preRoi = self.preGray[x_tl:x_tl + width,
                                              y_tl:y_tl + height]
                        curRoi = curGray[x_tl:x_tl + width, y_tl:y_tl + height]
                        flow = cv2.calcOpticalFlowFarneback(
                            preRoi, curRoi, 0.5, 3, 15, 3, 5, 1.2, 0)

                        # print preRoi.shape

                        has_waving_person = True
                        cv2.rectangle(img, (x_tl, y_tl),
                                      (x_tl + width, y_tl + height),
                                      (0, 0, 255), 10)
                        break

                if (has_waving_person == True):
                    print "found out"
                    break

        self.preGray = curGray

        debug_img_msg = br.cv2_to_imgmsg(img, encoding="bgr8")
        debug_img_msg.header = img_msg.header

        self.img_pub_.publish(debug_img_msg)
示例#3
0
    def __init__(self):
        # Initialize main node for publishing
        rospy.init_node('MainAction')
        rospy.loginfo('... Initializing MainAction node')

        # Enable baxter if it isn't already enabled
        baxter_interface.RobotEnable(CHECK_VERSION).state().enabled
        baxter_interface.RobotEnable(CHECK_VERSION).enable()

        # Get create main publisher for activating the Joint movements
        self.pub = rospy.Publisher('/robot/limb/left/joint_command',
                                   JointCommand,
                                   queue_size=10)

        # Create Class to control Baxter's Joints
        self.command = JointCommand()
        self.rate = rospy.Rate(70)

        # Variable to show every print statement for testing
        SHOW_RESULTS = False

        # Define screen pixels for Baxter's image (face LCD screen)
        h = 600
        w = 1024

        # Load image for the Baxter's screen
        img1 = cv2.imread("Tejada.png")

        # Resize the image to get to the maximum possible screen size
        img1 = cv2.resize(img1, (w, h), interpolation=cv2.INTER_CUBIC)

        # Create publisher for the Image that will be loaded to Baxter
        pub = rospy.Publisher('/robot/xdisplay',
                              Image,
                              latch=True,
                              queue_size=1)

        # Define message that will be loaded to the Image-publisher
        msg1 = cv_bridge.CvBridge().cv2_to_imgmsg(img1, encoding="bgr8")

        # Publish the desired message that contains the image
        pub.publish(msg1)

        # Get main vector for the trajectory
        T1 = trajectory_A.TrajectoryA(5)
        print("\n... Processing trajectory 1 ...\n")
        vector_x = T1.vector_x
        vector_y = T1.vector_y
        vector_z = T1.vector_z
        vector_angle_x = T1.vector_angle_x
        vector_angle_y = T1.vector_angle_y
        vector_angle_z = T1.vector_angle_z
        print("\n... Done processing trajectory 1 ...\n")

        # TM_test = transformation.Transformation(vector_angle_x[0], vector_angle_y[0],
        #             vector_angle_z[0], [vector_x[0], vector_y[0], vector_z[0]])
        # print(TM_test.TM)
        # THETAS = inverse_kinematics.inverse_kinematics_baxter(TM_test.TM, "left")
        # print(THETAS)

        # Get each vector of theta_i for all points
        self.THETA_1 = []
        self.THETA_2 = []
        self.THETA_4 = []
        self.THETA_5 = []
        self.THETA_6 = []
        self.THETA_7 = []

        # Generate main trajectories finding each angle of the joints with IK
        for i in range(len(vector_x)):
            # Get each transformation matrix for all cartesian points given
            TM_i = transformation.Transformation(
                vector_angle_x[i], vector_angle_y[i], vector_angle_z[i],
                [vector_x[i], vector_y[i], vector_z[i]])

            # Get each THETA_i for each point with the inverse-kinematics
            THETAS_i = inverse_kinematics.inverse_kinematics_baxter(
                TM_i.TM, "left")

            # Create all vectors of THETA_i for all necessary points
            self.THETA_1.append(float(THETAS_i[0]))
            self.THETA_2.append(float(THETAS_i[1]))
            self.THETA_4.append(float(THETAS_i[2]))
            self.THETA_5.append(float(THETAS_i[3]))
            self.THETA_6.append(float(THETAS_i[4]))
            self.THETA_7.append(float(THETAS_i[5]))

        if SHOW_RESULTS == True:
            print("\n THETA_1: \n", np.rad2deg(self.THETA_1))
            print("\n max(THETA_1) =", max(np.rad2deg(self.THETA_1)))
            print("\n min(THETA_1) =", min(np.rad2deg(self.THETA_1)))
            print("\n THETA_2: \n", np.rad2deg(self.THETA_2))
            print("\n max(THETA_2) =", max(np.rad2deg(self.THETA_2)))
            print("\n min(THETA_2) =", min(np.rad2deg(self.THETA_2)))
            print("\n THETA_4: \n", np.rad2deg(self.THETA_4))
            print("\n max(THETA_4) =", max(np.rad2deg(self.THETA_4)))
            print("\n min(THETA_4) =", min(np.rad2deg(self.THETA_4)))
            print("\n THETA_5: \n", np.rad2deg(self.THETA_5))
            print("\n max(THETA_5) =", max(np.rad2deg(self.THETA_5)))
            print("\n min(THETA_5) =", min(np.rad2deg(self.THETA_5)))
            print("\n THETA_6: \n", np.rad2deg(self.THETA_6))
            print("\n max(THETA_6) =", max(np.rad2deg(self.THETA_6)))
            print("\n min(THETA_6) =", min(np.rad2deg(self.THETA_6)))
            print("\n THETA_7: \n", np.rad2deg(self.THETA_7))
            print("\n max(THETA_7) =", max(np.rad2deg(self.THETA_7)))
            print("\n min(THETA_7) =", min(np.rad2deg(self.THETA_7)))
示例#4
0
def main(num_of_backsteps=1,dropout=1):

    # define the list of topics that you want to extract
    ros_topics = [
                # the duckiebot name can change from one bag file to the other, so define
                # the topics WITHOUT the duckiebot name in the beginning
                "/camera_node/image/compressed",
                "/lane_controller_node/car_cmd"
                ]

    # define the bags_directory in order to extract the data
    bags_directory = os.path.join(os.getcwd(), "bag_files")

    # define data_directory
    data_directory = 'data'
    if not os.path.exists(data_directory):
        os.makedirs(data_directory)

    # define train and test directories inside the data directory
    test_dir = os.path.join(data_directory, "test")
    train_dir = os.path.join(data_directory, "train")
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)
    if not os.path.exists(test_dir):
        os.mkdir(test_dir)
    if not os.path.exists(train_dir):
        os.mkdir(train_dir)

    cvbridge = cv_bridge.CvBridge()

    # create a dataframe to store the data for all bag files
    # df_all = pd.DataFrame()

    first_time = True

    for file in os.listdir(bags_directory):
        if not file.endswith(".bag"):
            continue

        # extract bag_ID to include it in the data for potential future use (Useful in case of weird data distributions
        # or final results, since you will be able to associate the data with the bag files)
        bag_ID = file.partition(".bag")[0]

        # extract the duckiebot name to complete the definition of the nodes
        duckiebot_name = file.partition("_")[2].partition(".bag")[0]

        # complete the topics names with the duckiebot name in the beginning
        ros_topics_temp = copy(ros_topics)
        for num, topic in enumerate(ros_topics_temp):
            ros_topics_temp[num] = "/" + duckiebot_name + topic

        # define absolute path of the bag_file
        abs_path = os.path.abspath(os.path.join(bags_directory, file))

        print("Extract data for {} file.".format(file))
        try:
            msgs = extract_messages(abs_path, ros_topics_temp)
        except rosbag.bag.ROSBagException:
            print("Failed to open {}".format(abs_path))
            continue

                         ######## This following part is implementation specific ########

        # The composition of the ros messages is different (e.g. different names in the messages) and also different
        # tools are used to handle the different extracted data (e.g. cvbridge for images). As a result, the following
        # part of the script can be used as a basis to extract the data, but IT HAS TO BE MODIFIED based on your topics.

        # easy way to find the structure of your ros messages : print dir(msgs[name_of_topic])


        # extract the images and car_cmds messages
        ext_images = msgs["/" + duckiebot_name + "/camera_node/image/compressed"].messages
        ext_car_cmds = msgs["/" + duckiebot_name + "/lane_controller_node/car_cmd"].messages

        # create dataframe with the images and the images' timestamps
        for num, img in enumerate(ext_images):

            # get the rgb image
            img = cvbridge.compressed_imgmsg_to_cv2(img.message)
            img = image_preprocessing(img)  # -> each image is of dimensions (1, 48x96x3=13824)

            # hack to get the timestamp of each image in <float 'secs.nsecs'> format instead of <int 'rospy.rostime.Time'>
            temp_timestamp = ext_images[num].timestamp
            img_timestamp = temp_timestamp.secs + temp_timestamp.nsecs *10 ** -len(str(temp_timestamp.nsecs))

            temp_df = pd.DataFrame({
                'img': [img],
                'img_timestamp': [img_timestamp]
            })

            if num == 0:
                df_imgs = temp_df.copy()
            else:
                df_imgs = df_imgs.append(temp_df, ignore_index=True)


        # create dataframe with the car_cmds and the car_cmds' timestamps
        for num, cmd in enumerate(ext_car_cmds):

            # read wheel commands messages
            cmd_msg = cmd.message

            # hack to get the timestamp of each image in <float 'secs.nsecs'> format instead of <int 'rospy.rostime.Time'>
            temp_timestamp = ext_car_cmds[num].timestamp
            vel_timestamp = temp_timestamp.secs + temp_timestamp.nsecs * 10 ** -len(str(temp_timestamp.nsecs))

            temp_df = pd.DataFrame({
                'vel_timestamp': [vel_timestamp],
                'vel_omega': [cmd_msg.omega],
                'vel_v': [cmd_msg.v]
            })

            if num == 0:
                df_cmds = temp_df.copy()
            else:
                df_cmds = df_cmds.append(temp_df, ignore_index=True)

        # synchronize data
        print("Starting synchronization of data for {} file.".format(file))

        temp_synch_data, temp_synch_imgs = synchronize_data(df_imgs, df_cmds, bag_ID)
        # print temp_synch_data.shape, temp_synch_imgs.shape

        # backstepping data preparation
        if num_of_backsteps > 1:
            temp_synch_imgs,temp_synch_data = backstepping_prep(temp_synch_imgs,temp_synch_data,num_of_backsteps,dropout)

        if first_time:
            synch_data = copy(temp_synch_data)
            synch_imgs = copy(temp_synch_imgs)
            first_time = False

        else:
            synch_data = np.vstack((synch_data, temp_synch_data))
            synch_imgs = np.vstack((synch_imgs, temp_synch_imgs))

        print("\nShape of total data: {} , shape of total images: {}\n".format(synch_data.shape, synch_imgs.shape))

    print("Synchronization of all data is finished.\n")

    # define size of train dataset
    train_size = int(0.9 * synch_data.shape[0])

    # create train dataframe
    df_data_train = pd.DataFrame({
        'img_timestamp': synch_data[:train_size, 0],
        'vel_timestamp': synch_data[:train_size, 1],
        'vel_v': synch_data[:train_size, 2],
        'vel_omega': synch_data[:train_size, 3],
        'bag_ID': synch_data[:train_size, 4],
    })

    # create train dataframe for images in order to save them in the same .h5 file with the rest train data
    df_img_train = pd.DataFrame(data=synch_imgs[:train_size,:],index=range(train_size))

    # create test dataframe
    df_data_test = pd.DataFrame({
        'img_timestamp': synch_data[train_size:, 0],
        'vel_timestamp': synch_data[train_size:, 1],
        'vel_v': synch_data[train_size:, 2],
        'vel_omega': synch_data[train_size:, 3],
        'bag_ID': synch_data[train_size:, 4],
    })

    # create test dataframe for images in order to save them in the same .h5 file with the rest test data
    df_img_test = pd.DataFrame(data=synch_imgs[train_size:,:],index=range(train_size,synch_data.shape[0]))
    #pd.DataFrame({
        #'img': synch_imgs[train_size:,:]
    #},index=range(train_size,synch_data.shape[0]))

    # save train and test datasets to .h5 files

    # ATTENTION 1 !!
    #  If the datasets become too large, you could face memory errors on laptops.
    # If you face memory errors while saving the following files, split the data to multiple .h5 files.

    # ATTENTION 2 !!
    # The .h5 files are tricky and require special attention. In these files you save compressed objects and you can
    # have more than one objects saved in the same file. If for example we have two different dataframes df1 and df2,
    # then df1.to_hdf('file.h5', key='df1') and df2.to_hdf('file.h5', key='df2') will result to both df1, df2 to be
    # saved in 'file.h5' file but with different key for each dataframe. However, if we save the same dataframe to the
    # same .h5 file with the same key, then in this file you will have the same information twice as different objects
    # and thus the size of the .h5 file will be double for no reason and without any warning. As a result, here since
    # the key does not change, we will check if the .h5 file exists before saving the new data, and if it exists we will
    # first remove the previous file ad then save the new data.

    # define the names of the train and test .h5 files
    train_set_name = os.path.join(train_dir, 'train_set.h5')
    test_set_name = os.path.join(test_dir, 'test_set.h5')

    # check if these two files exist in the data directory and if yes remove them before saving the new files
    if os.path.isfile(train_set_name):
        os.remove(train_set_name)

    if os.path.isfile(test_set_name):
        os.remove(test_set_name)

    # df_all_train.to_hdf(train_set_name, 'table')
    df_data_train.to_hdf(train_set_name, key='data')
    df_img_train.to_hdf(train_set_name, key='images')

    df_data_train.info()
    df_img_train.info()

    # df_all_test.to_hdf(test_set_name, 'table')
    df_data_test.to_hdf(test_set_name, key='data')
    df_img_test.to_hdf(test_set_name, key='images')

    print("\nThe total {} data were split into {} training and {} test datasets and saved in {} "
          "directory.".format(synch_data.shape[0], df_data_train.shape[0], df_data_test.shape[0], data_directory))
示例#5
0
 def __init__(self):
     self.bridge = cv_bridge.CvBridge()
     self.image_sub = rospy.Subscriber('/camera/color/image_raw', Image,
                                       self.image_cb)
     self.pixel_pub = rospy.Publisher('obj_points', pixels, queue_size=1)
     self.pixels = pixels()
示例#6
0
    def publishHazmap(self):
        hazRaw = self.polPack['hazmap']
        #Invert to match ROS convention of white=traversable

        self.hazPub.publish(cv_bridge.CvBridge().cv2_to_imgmsg(
            cv2.bitwise_not(hazRaw), encoding='mono8'))
示例#7
0
 def camera_callback(self, data, camera_name):
     # Convert image from a ROS image message to a CV image
     try:
         self.cv_image = cv_bridge.CvBridge().imgmsg_to_cv(data, "bgr8")
     except cv_bridge.CvBridgeError, e:
         print e
示例#8
0
def display_image():
    img = cv2.imread(FACE_IMAGE_PATH)
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub = rospy.Publisher('/robot/xdisplay', Image, queue_size=100, latch=True)
    pub.publish(msg)
#!/usr/bin/env python

import cv2
import rospy
import rospkg
import cv_bridge
from sensor_msgs.msg import Image

if __name__ == '__main__':

    rospy.init_node("display_robotman")

    rospack = rospkg.RosPack()
    image_path = rospack.get_path('robotman_gazebo') + '/images/'
    img = cv2.imread(image_path + 'screen.png')

    msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
    pub = rospy.Publisher('/screen/body/image',
                          Image,
                          latch=True,
                          queue_size=1)
    rate = rospy.Rate(1)

    while not rospy.is_shutdown():

        pub.publish(msg)
        rate.sleep()
示例#10
0
 def shape_cam_callback(self, msg):
     bridge = cv_bridge.CvBridge()
     image = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
     self.hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
示例#11
0
 def __init__(self):
     self.bridge = cv_bridge.CvBridge()
     cv2.namedWindow("window", 1)
     self.image_sub = rospy.Subscriber('camera/rgb/image_raw', Image,
                                       self.image_callback)
示例#12
0
 def __init__(self):
     self.bridge = cv_bridge.CvBridge()
     self.image_sub = rospy.Subscriber('/ur5/usbcam/image_raw', Image,
                                       self.image_callback)
示例#13
0
    def _callback(self, img_msg, mask_msg):
        bridge = cv_bridge.CvBridge()
        bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
        mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8')
        if mask_img.size < 1:
            logwarn_throttle(10, 'Too small sized image')
            return
        logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<')
        if mask_img.ndim == 3 and mask_img.shape[2] == 1:
            mask_img = mask_img.reshape(mask_img.shape[:2])
        if mask_img.shape != bgr_img.shape[:2]:
            jsk_logwarn('Size of mask and color image is different.'
                        'Resizing.. mask {0} to {1}'.format(
                            mask_img.shape, bgr_img.shape[:2]))
            mask_img = resize(mask_img, bgr_img.shape[:2],
                              preserve_range=True).astype(np.uint8)

        blob = bgr_img - self.mean_bgr
        blob = blob.transpose((2, 0, 1))

        x_data = np.array([blob], dtype=np.float32)
        if self.gpu != -1:
            x_data = cuda.to_gpu(x_data, device=self.gpu)
        x = Variable(x_data, volatile=True)
        self.model(x)
        pred_datum = cuda.to_cpu(self.model.score.data[0])

        candidate_labels = [
            self.target_names.index(name) for name in self.tote_contents
        ]
        label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0)
        label_pred = np.zeros_like(label_pred_in_candidates)
        for idx, label_val in enumerate(candidate_labels):
            label_pred[label_pred_in_candidates == idx] = label_val
        label_pred[mask_img == 0] = 0  # set bg_label

        label_viz = label2rgb(label_pred, bgr_img, bg_label=0)
        label_viz = (label_viz * 255).astype(np.uint8)
        debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8')
        debug_msg.header = img_msg.header
        self.pub_debug.publish(debug_msg)

        output_mask = np.ones(mask_img.shape, dtype=np.uint8)
        output_mask *= 255
        for label_val, label_name in enumerate(self.target_names):
            if label_name in self.label_names:
                assert label_name == 'kleenex_paper_towels'
                assert label_val == 21
                label_mask = ((label_pred == label_val) * 255).astype(np.uint8)
                contours, hierachy = cv2.findContours(label_mask,
                                                      cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)
                cv2.drawContours(output_mask, contours, -1, 255, -1)
                # output_mask[label_pred == label_val] = False
        # output_mask = output_mask.astype(np.uint8)
        # output_mask[output_mask == 1] = 255
        output_mask[mask_img == 0] = 0
        output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8')
        output_mask_msg.header = img_msg.header
        self.pub.publish(output_mask_msg)
        logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
示例#14
0
    def pick_and_place(self):
        n_ball = 0
        while True and n_ball < 12:  # assume no more than 12 golf balls
            n_ball += 1
            iteration = 0
            angle = 0.0

            # use Hough circles to find balls and select one ball
            next_ball, angle = self.hough_it(n_ball, iteration)

            error = 2 * self.ball_tolerance

            print
            print "Ball number ", n_ball
            print "==============="

            # iterate to find next golf ball
            # if hunting to and fro accept error in position
            while error > self.ball_tolerance and iteration < 10:
                iteration += 1
                next_ball, angle, error = self.golf_ball_iterate(
                    n_ball, iteration, next_ball)

            font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 1)
            position = (30, 60)
            s = "Picking up golf ball"
            cv.PutText(self.cv_image, s, position, font, self.white)
            msg = cv_bridge.CvBridge().cv_to_imgmsg(self.cv_image,
                                                    encoding="bgr8")
            self.pub.publish(msg)

            print "DROPPING BALL ANGLE =", angle * (math.pi / 180)
            if angle != self.yaw:  # if neighbouring ball
                pose = (
                    self.pose[0],  # rotate gripper to avoid hitting neighbour
                    self.pose[1],
                    self.pose[2],
                    self.pose[3],
                    self.pose[4],
                    angle)
                self.baxter_ik_move(self.limb, pose)

                cv.PutText(self.cv_image, s, position, font, self.white)
                msg = cv_bridge.CvBridge().cv_to_imgmsg(self.cv_image,
                                                        encoding="bgr8")
                self.pub.publish(msg)

            # slow down to reduce scattering of neighbouring golf balls
            self.limb_interface.set_joint_position_speed(0.1)

            # move down to pick up ball
            pose = (self.pose[0] + self.cam_x_offset,
                    self.pose[1] + self.cam_y_offset,
                    self.pose[2] + (0.112 - self.distance), self.pose[3],
                    self.pose[4], angle)
            self.baxter_ik_move(self.limb, pose)
            self.print_arm_pose()

            # close the gripper
            self.gripper.close()

            s = "Moving to ball tray"
            cv.PutText(self.cv_image, s, position, font, self.white)
            msg = cv_bridge.CvBridge().cv_to_imgmsg(self.cv_image,
                                                    encoding="bgr8")
            self.pub.publish(msg)

            pose = (self.pose[0], self.pose[1], self.pose[2] + 0.198,
                    self.pose[3], self.pose[4], self.yaw)
            self.baxter_ik_move(self.limb, pose)

            # speed up again
            self.limb_interface.set_joint_position_speed(0.5)

            # display current image on head display
            cv.PutText(self.cv_image, s, position, font, self.white)
            msg = cv_bridge.CvBridge().cv_to_imgmsg(self.cv_image,
                                                    encoding="bgr8")
            self.pub.publish(msg)

            # move dowm
            pose = (self.ball_tray_place[n_ball - 1][0],
                    self.ball_tray_place[n_ball - 1][1], self.pose[2] - 0.19,
                    self.pose[3], self.pose[4], self.pose[5])
            self.baxter_ik_move(self.limb, pose)

            # display current image on head display
            s = "Placing golf ball in ball tray"
            cv.PutText(self.cv_image, s, position, font, self.white)
            msg = cv_bridge.CvBridge().cv_to_imgmsg(self.cv_image,
                                                    encoding="bgr8")
            self.pub.publish(msg)

            # open the gripper
            self.gripper.open()

            # prepare to look for next ball
            pose = (self.golf_ball_x, self.golf_ball_y, self.golf_ball_z,
                    -1.0 * math.pi, 0.0 * math.pi, 0.0 * math.pi)
            self.baxter_ik_move(self.limb, pose)

        # display all balls found on head display
        self.splash_screen("all balls", "found")

        print "All balls found"
示例#15
0
 def __init__(self, cam_name, cbk=None, img_fmt="passthrough"):
     self.cam_name, self.img_cbk = cam_name, cbk
     self.bridge = cv_bridge.CvBridge()
     self.img, self.sub, self.img_fmt = None, None, img_fmt
示例#16
0
 def sendImage(self, img):
     msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
     #print(msg)
     self.pub.publish(msg)
示例#17
0
    def publishDEM(self):

        self.demPub.publish(cv_bridge.CvBridge().cv2_to_imgmsg(
            self.polPack['src'], encoding='64FC1'))
        print 'DEM published'
    def callback(self, imgmsg):
        bridge = cv_bridge.CvBridge()
        rgb = bridge.imgmsg_to_cv2(imgmsg, desired_encoding="rgb8")

        masks, labels, confs = self._model.predict(
            [rgb.astype(np.float32).transpose(2, 0, 1)]
        )
        masks = masks[0]
        labels = labels[0]
        confs = confs[0]

        class_ids = labels + 1
        del labels

        if self._blacklist:
            keep = ~np.isin(class_ids, self._blacklist)
            masks = masks[keep]
            class_ids = class_ids[keep]
            confs = confs[keep]

        if len(class_ids) > 0:
            keep = masks.sum(axis=(1, 2)) > 0
            class_ids = class_ids[keep]
            masks = masks[keep]
            confs = confs[keep]

        if len(class_ids) > 0:
            uniq, counts = np.unique(class_ids, return_counts=True)
            keep = []
            for cls_id, count in zip(uniq, counts):
                if count == 1:
                    index = np.argwhere(class_ids == cls_id)[0, 0]
                else:
                    index = np.argmax(confs[class_ids == cls_id])
                keep.append(index)
            class_ids = class_ids[keep]
            masks = masks[keep]
            confs = confs[keep]

        if len(class_ids) > 0:
            sort = np.argsort(confs)
            class_ids = class_ids[sort]
            masks = masks[sort]
            confs = confs[sort]

        instance_ids = np.arange(0, len(masks))
        label_ins = np.full(rgb.shape[:2], -1, dtype=np.int32)
        for ins_id, mask in zip(instance_ids, masks):
            label_ins[mask] = ins_id
        ins_msg = bridge.cv2_to_imgmsg(label_ins)
        ins_msg.header = imgmsg.header
        self._pub_ins.publish(ins_msg)

        instance_ids_active = np.unique(label_ins)
        keep = np.isin(instance_ids, instance_ids_active)
        instance_ids = instance_ids[keep]
        class_ids = class_ids[keep]
        confs = confs[keep]

        cls_msg = ObjectClassArray(header=imgmsg.header)
        for ins_id, cls_id, conf in zip(instance_ids, class_ids, confs):
            cls_msg.classes.append(
                ObjectClass(
                    instance_id=ins_id, class_id=cls_id, confidence=conf,
                )
            )

        self._pub_cls.publish(cls_msg)
    def on_predict(self, goal):
        rospy.loginfo('nbv_3d_cnn_predict: action start.')

        if (self.model_type == ''):
            model_file_prefix = ''
            output_channels = 1
        elif (self.model_type == 'flat'):
            model_file_prefix = 'flat'
            output_channels = 1
        elif (self.model_type == 'circular'):
            model_file_prefix = 'circular'
            output_channels = 1
        elif (self.model_type == 'quat'):
            model_file_prefix = 'scoreangle'
            output_channels = 2
        elif (self.model_type == 'autocomplete'):
            model_file_prefix = 'autocomplete'
            output_channels = 1
        else:
            rospy.logfatal('Unknown model type: ' + self.model_type)
            exit(1)

        image_width = goal.empty.width
        image_height = goal.empty.height

        if (goal.frontier.width != image_width
                or goal.frontier.height != image_height):
            rospy.error(
                'nbv_3d_cnn_predict: empty image has size %d %d, but frontier image has size %d %d, aborted.'
                % (image_width, image_height, goal.frontier.width,
                   goal.frontier.height))
            self.action_server.set_aborted()
            return

        bridge = cv_bridge.CvBridge()
        empty_image = bridge.imgmsg_to_cv2(goal.empty,
                                           desired_encoding='mono8')
        frontier_image = bridge.imgmsg_to_cv2(goal.frontier,
                                              desired_encoding='mono8')

        np_empty_image = empty_image
        np_frontier_image = frontier_image
        input_image = [np_empty_image, np_frontier_image]
        input_image = np.transpose(input_image, [1, 2, 0])

        input_image = input_image.astype(
            'float32') / 255.0  # convert to zeros and ones

        input_shape = [image_height, image_width]

        if (self.model == None or self.last_input_shape != input_shape):
            load_input_shape = [image_height, image_width, 2]
            rospy.loginfo(
                'nbv_3d_cnn_predict: last input shape does not match, reloading model (type "%s").'
                % self.model_type)
            #model = nbv_2d_model.get_2d_model(self.sensor_range_voxels, load_input_shape, 2)

            if (self.model_type == ''):
                model = nbv_2d_model.get_2d_model(self.sensor_range_voxels,
                                                  load_input_shape,
                                                  self.sub_image_expand_pow)
            elif (self.model_type == 'flat'):
                model = nbv_2d_model.get_flat_2d_model(
                    self.sensor_range_voxels, load_input_shape,
                    self.sub_image_expand_pow)
            elif (self.model_type == 'quat'):
                model = nbv_2d_model.get_quat_2d_model(
                    self.sensor_range_voxels, load_input_shape)
            elif (self.model_type == 'autocomplete'):
                model = nbv_2d_model.get_autocomplete_2d_model(
                    self.sensor_range_voxels, load_input_shape)
            elif (self.model_type == 'circular'):
                model = nbv_2d_model.get_circular_2d_model(
                    self.sensor_range_voxels, load_input_shape,
                    self.sub_image_expand_pow)

            model.summary()
            model.load_weights(self.checkpoint_file)

            self.model = model
            self.last_input_shape = input_shape
        else:
            model = self.model

        rospy.loginfo('nbv_3d_cnn_predict: predicting.')
        prediction = model.predict(np.array([
            input_image,
        ]))

        rospy.loginfo('nbv_3d_cnn_predict: sending result.')
        prediction = prediction[0]

        if (output_channels == 1):
            prediction = np.transpose(prediction, [2, 0, 1])
            prediction = prediction[0]
        elif (output_channels == 2):
            prediction = np.transpose(prediction, [2, 0, 1])
            prediction = np.asarray([
                prediction[0], prediction[1],
                np.full((image_height * self.sub_image_expand,
                         image_width * self.sub_image_expand), 0.0)
            ])
            prediction = np.transpose(prediction, [1, 2, 0])
            pass

        if (output_channels == 1):
            encoding = "32FC1"
        else:
            encoding = "32FC3"
            pass

        prediction = prediction.astype('float32')

        result = nbv_3d_cnn_msgs.PredictResult()
        result.scores = bridge.cv2_to_imgmsg(prediction, encoding=encoding)

        self.action_server.set_succeeded(result)

        rospy.loginfo('nbv_3d_cnn_predict: action succeeded.')
        pass
示例#20
0
 def __init__(self):
     self.bridge = cv_bridge.CvBridge()
     self.image_sub = rospy.Subscriber('/camera/rgb/image_raw', Image,
                                       self.image_callback)
     self.cmd_vel_pub = rospy.Publisher('direction', String, queue_size=1)
示例#21
0
try:
    import PIL.Image
except ImportError:
    print(
        'Missing library(Pillow)! you can install it using `pip3 install Pillow`'
    )

try:
    import numpy
except ImportError:
    print(
        'Missing library(numpy)! you can install it using `pip3 install numpy`'
    )

from sensor_msgs.msg import Image

rospy.init_node('issue_cv_bridge_node')

bridge = cv_bridge.CvBridge()
empty_image = PIL.Image.new('RGB', (10, 10))  # create new empty 10*10 image
ros_image = bridge.cv2_to_imgmsg(
    numpy.asarray(empty_image),
    encoding='rgb8')  # convert PIL image to ROS image

rate = rospy.Rate(1)
image_publisher = rospy.Publisher('~image', Image, queue_size=1)

while not rospy.is_shutdown():
    image_publisher.publish(ros_image)
    rate.sleep()
 def publicador_a_pantalla(self,texto):
     position = (300, 300)
     fondo_negro = np.full((600,1024, 3), 0, dtype = np.uint8)
     cv2.putText(fondo_negro, texto, position, cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 4)
     msg = cv_bridge.CvBridge().cv2_to_imgmsg(fondo_negro)
     self.pub.publish(msg)
示例#23
0
def process_image(image, depth, net, gaze):
    global old_class_ids, old_conf, old_indices, old_boxes
    width = image.shape[1]
    height = image.shape[0]

    # create input blob
    blob = cv2.dnn.blobFromImage(image,
                                 scale, (416, 416), (0, 0, 0),
                                 True,
                                 crop=False)
    # set input blob for the network
    net.setInput(blob)

    # run inference through the network
    # and gather predictions from output layers
    outs = net.forward(get_output_layers(net))

    # initialization
    class_ids = []
    confidences = []
    boxes = []
    # for each detection from each output layer
    # get the confidence, class id, bounding box params
    # and ignore weak detections (confidence < 0.5)
    for out in outs:
        for detection in out:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > 0.5:
                center_x = int(detection[0] * width)
                center_y = int(detection[1] * height)
                w = int(detection[2] * width)
                h = int(detection[3] * height)
                x = center_x - w / 2
                y = center_y - h / 2
                class_ids.append(class_id)
                confidences.append(float(confidence))
                boxes.append([x, y, w, h])

    # apply non-max suppression
    indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold,
                               nms_threshold)

    # go through the detections remaining
    # after nms and draw bounding box
    #    ann = cv2.cvtColor(depth, cv2.COLOR_BGRA2BGR)
    for i in old_indices:
        i = i[0]
        box = old_boxes[i]
        x = max(0, box[0])
        y = max(0, box[1])
        w = max(0, box[2])
        h = max(0, box[3])
        if str(classes[old_class_ids[i]]) == "person":
            pub.publish("add not(saw(" + str(classes[old_class_ids[i]]) + "," +
                        str(int(100 * old_conf[i])) + "," +
                        str(int(x + w / 2)) + "," + str(int(y)) + "," +
                        str(int(w)) + "," + str(int(h)) + ")).\n")

    old_indices = indices
    old_boxes = boxes
    old_conf = confidences
    old_class_ids = class_ids

    for i in indices:
        i = i[0]
        box = boxes[i]
        x = max(0, box[0])
        y = max(0, box[1])
        w = max(0, box[2])
        h = max(0, box[3])
        """
        # Trying to do something with depth data, but it seems like gibberish
        # There's an issue open on the pyfreenect2 GitHub
        print(depth.shape)
        d = depth[int((y + h / 2) * 424 / 1080)][int((x + w / 2) * 512 / 1920)]
        print("test")
        print(d)
        sum = [0, 0, 0, 0]
        for j in range(int((x + w / 2) * 512 / 1920), int((x + w / 2) * 512 / 1920) + 10):
            for k in range(int((y + h / 2) * 424 / 1080), int((y + h / 2) * 424 / 1080) + 10):
                sum = np.add(depth[k][j], sum)
        sum = sum / 100
        print(sum)
        print(image.shape)
        print(image[int((y + h / 2))][int((x + w / 2))])
        distance = d[3] / 255
        """

        # Gaze Detection
        #        if str(classes[class_ids[i]]) == "person":
        # Run gaze detection only on the bounding box region of people
        # Somewhat redundant, as the gaze detection does face recognition as well
        # Seems necessary though, as it won't detect multiple people, and it needs the face data to determine direction
        #            gaze.refresh(image[y:y + h, x:x + w])

        # Place green crosses on eyes
        #            image[y:y + h, x:x + w] = gaze.annotated_frame()

        # This is weird
        # Even if pupils_located is a success, the ratio methods subtract 10 from a denominator somewhere
        # So sometimes there's a divide by 0 error
        # This try catch is just to stop the program from dying, but sometimes it still does
        # It's dying when the video feeds freeze and go black and white
        #            if gaze.pupils_located:
        #                try:
        #                    cv2.putText(image, "pupil x: " + str(round(gaze.horizontal_ratio(), 3)) + " y: " + str(
        #                        round(gaze.vertical_ratio(), 3)), (int(x + w - 200), y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
        #                                COLORS[class_ids[i]], 2)
        #                except ZeroDivisionError:
        #                    pass

        # ALMA Communication
        # saw(label, confidence (50-100), center_X, center_Y, width, height, time)
        # Note that time gets added by ALMA because we use obs (see ALMA documentation)
        # Still no depth
        #        pub.publish(
        #            "obs saw(" + str(classes[class_ids[i]]) + "," + str(int(100*confidences[i])) + "," + str(int(x + w/2)) + "," + str(
        #                int(y)) + "," + str(int(w)) + "," + str(int(h)) + ").\n")
        if str(classes[class_ids[i]]) == "person":
            pub.publish("add saw(" + str(classes[class_ids[i]]) + "," +
                        str(int(100 * confidences[i])) + "," +
                        str(int(x + w / 2)) + "," + str(int(y)) + "," +
                        str(int(w)) + "," + str(int(h)) + ").\n")
    #sys.stderr.write("Sent:   obs saw(" + str(classes[class_ids[i]]) + "," + str(int(100*confidences[i])) + "," + str(int(x + w/2)) + "," + str( int(y)) + "," + str(int(w)) + "," + str(int(h)) + ").\n")

    # Draws bounding box on color image
    # Math with passing parameters probably not optimal, but just some addition so not too time inefficient
        draw_bounding_box(image, class_ids[i], confidences[i], int(x), int(y),
                          int(x + w), int(y + h))

        # Draws small point at the center of every box on the depth image
        # These points don't seem to match up; depth sensor and color sensor aren't the same aspect ratio or FOV
        # Some kinect drivers have the ability to flatten it out or get real space coordinates


#        draw_bounding_box(ann, class_ids[i], confidences[i], int((x + w / 2) * 512 / 1920),
#                          int((y + h / 2) * 424 / 1080), 5 + int((x + w / 2) * 512 / 1920),
#                          5 + int((y + h / 2) * 424 / 1080))

# display color and depth output on screen
    out_image_name = "color"  # + str(index)
    cv2.imshow(out_image_name, image)
    #    cv2.imshow("depth", ann)

    #Send color output to Baxter screen
    msg = cv_bridge.CvBridge().cv2_to_imgmsg(cv2.resize(image, (1024, 600)),
                                             encoding="bgr8")
    videoPub.publish(msg)
示例#24
0
# SPDX-License-Identifier: CC0-1.0

from __future__ import absolute_import, division, print_function

import json
import os

import cv2
import cv_bridge
import matplotlib

matplotlib.use('Agg')
import matplotlib.pyplot as plt
import mpld3

imgmsg_to_cv2 = cv_bridge.CvBridge().imgmsg_to_cv2

import marv
from marv_nodes.types_capnp import File
from marv_detail.types_capnp import Section, Widget
from marv_robotics.bag import get_message_type, raw_messages

TOPIC = '/wide_stereo/left/image_rect_throttle'


@marv.node(File)
@marv.input('cam', marv.select(raw_messages, TOPIC))
def image(cam):
    """Extract first image of input stream to jpg file.

    Args:
 def __init__(self):
     self.bridge = cv_bridge.CvBridge()
     self.image_sub = rospy.Subscriber('camera/rgb/image_raw', Image,
                                       self.image_callback)
     self.twist = Twist()
示例#26
0
    def _step(self, action):

        rospy.wait_for_service('/gazebo/unpause_physics')
        try:
            self.unpause()
        except (rospy.ServiceException) as e:
            print ("/gazebo/unpause_physics service call failed")

        if action == 0: #FORWARD
            vel_cmd = Twist()
            vel_cmd.linear.x = 0.1
            vel_cmd.angular.z = 0.0
            self.vel_pub.publish(vel_cmd)
        elif action == 1: #LEFT
            vel_cmd = Twist()
            vel_cmd.linear.x = 0.05
            vel_cmd.angular.z = 0.1
            self.vel_pub.publish(vel_cmd)
        elif action == 2: #RIGHT
            vel_cmd = Twist()
            vel_cmd.linear.x = 0.05
            vel_cmd.angular.z = -0.1
            self.vel_pub.publish(vel_cmd)

        data = None
        img_data=None
        image=None
        while data is None:
            try:
                data = rospy.wait_for_message('/scan', LaserScan, timeout=5)
                img_data = rospy.wait_for_message('/camera/rgb/image_raw', Image, timeout=5)
                #h = image_data.height
                #w = image_data.width
                #turn kinect image to cv2 image
                image = cv_bridge.CvBridge().imgmsg_to_cv2(img_data,desired_encoding='mono8')

                #turn grayscale image to binary image(inorder to have more contrast)
                (thresh, im_bw) = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
                cv2.imshow("window",im_bw)
                cv2.waitKey(3)
            except:
                pass
                print("NOTHING&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")

        rospy.wait_for_service('/gazebo/pause_physics')
        try:
            #resp_pause = pause.call()
            self.pause()
        except (rospy.ServiceException) as e:
            print ("/gazebo/pause_physics service call failed")

        state,done = self.discretize_observation(data,5)

        if not done:
            if action == 0:
                reward = 5
            else:
                reward = 1
        else:
            reward = -20

        return state, reward, done, im_bw
示例#27
0
def rosbag_to_sample(rosbag_fname):
    print('Parsing {0}'.format(rosbag_fname))

    bebop_topics = params['bebop']['topics']

    ### read all messages
    bag = rosbag.Bag(rosbag_fname, 'r')
    msg_dict = collections.defaultdict(list)
    start_time = None
    for topic, msg, t in bag.read_messages():
        if start_time is None:
            start_time = t.to_sec()
        else:
            assert(start_time <= t.to_sec())
        msg_dict[topic].append((t.to_sec() - start_time, msg))
    bag.close()
    # get_measured_vel(rosbag_fname, msg_dict) # necessary hack
    if len(msg_dict[bebop_topics['start_rollout']]) == 0:
        print('\tNo start_rollout, returning None')
        return None
    ### sort by time
    for k, v in msg_dict.items():
        msg_dict[k] = sorted(v, key=lambda x: x[0])
    ### prune start and end
    is_coll = (bebop_topics['collision'] in msg_dict.keys())
    start_time = msg_dict[bebop_topics['start_rollout']][0][0]
    end_time = msg_dict[bebop_topics['collision']][0][0] if is_coll else np.inf
    for topic, time_msg in msg_dict.items():
        msg_dict[topic] = [(t, msg) for t, msg in time_msg if start_time <= t <= end_time]

    ### necessary info for Samples
    dt = params['dt']
    T = params['prediction']['dagger']['T']

    ### space by dt
    image_time_msgs = spaced_messages(msg_dict[bebop_topics['image']], dt)[-T:]
    cmd_vel_time_msgs = aligned_messages(image_time_msgs, msg_dict[bebop_topics['cmd_vel']], dt)[-T:]
    # measured_vel_time_msgs = aligned_messages(image_time_msgs, msg_dict[bebop_topics['measured_vel']], dt)[-T:]

    ### create sample
    sample = Sample(T=len(image_time_msgs), meta_data=params)
    cvb = cv_bridge.CvBridge()
    for t, (image_time_msg, cmd_vel_time_msg) in enumerate(zip(image_time_msgs, cmd_vel_time_msgs)):
        image_msg = image_time_msg[1]
        cmd_vel_msg = cmd_vel_time_msg[1]

        ### image
        im = AgentBebop2d.process_image(image_msg, cvb)
        sample.set_O(im.ravel(), t=t, sub_obs='camera')

        ### collision
        sample.set_O([0], t=t, sub_obs='collision')

        ### linearvel
        sample.set_X([cmd_vel_msg.linear.x, cmd_vel_msg.linear.y], t=t)
        sample.set_U([cmd_vel_msg.linear.x, cmd_vel_msg.linear.y], t=t)
    sample.set_O([int(is_coll)], t=-1, sub_obs='collision')
    # sample.set_O([int(np.random.random() > 0.5)], t=-1, sub_obs='collision') # tmp

    assert(sample.isfinite())

    return sample
示例#28
0
 def __init__(self, cam, img_topic="/trr_vision/start_finish/image_debug"):
     rospy.loginfo(' publishing image on ({})'.format(img_topic))
     self.image_pub = rospy.Publisher(img_topic,
                                      sensor_msgs.msg.Image,
                                      queue_size=1)
     self.bridge = cv_bridge.CvBridge()
示例#29
0
    def __init__(self, name, chess_size, dim, approximate=0):
        super().__init__(name)
        self.board = ChessboardInfo()
        self.board.n_cols = chess_size[0]
        self.board.n_rows = chess_size[1]
        self.board.dim = dim

        # make sure n_cols is not smaller than n_rows, otherwise error computation will be off
        if self.board.n_cols < self.board.n_rows:
            self.board.n_cols, self.board.n_rows = self.board.n_rows, self.board.n_cols

        image_topic = "monocular/image_rect"
        camera_topic = "monocular/camera_info"

        tosync_mono = [
            (image_topic, sensor_msgs.msg.Image),
            (camera_topic, sensor_msgs.msg.CameraInfo),
        ]

        if approximate <= 0:
            sync = message_filters.TimeSynchronizer
        else:
            sync = functools.partial(ApproximateTimeSynchronizer,
                                     slop=approximate)

        tsm = sync([
            message_filters.Subscriber(self, type, topic)
            for (topic, type) in tosync_mono
        ], 10)
        tsm.registerCallback(self.queue_monocular)

        left_topic = "stereo/left/image_rect"
        left_camera_topic = "stereo/left/camera_info"
        right_topic = "stereo/right/image_rect"
        right_camera_topic = "stereo/right/camera_info"

        tosync_stereo = [(left_topic, sensor_msgs.msg.Image),
                         (left_camera_topic, sensor_msgs.msg.CameraInfo),
                         (right_topic, sensor_msgs.msg.Image),
                         (right_camera_topic, sensor_msgs.msg.CameraInfo)]

        tss = sync([
            message_filters.Subscriber(self, type, topic)
            for (topic, type) in tosync_stereo
        ], 10)
        tss.registerCallback(self.queue_stereo)

        self.br = cv_bridge.CvBridge()

        self.q_mono = Queue()
        self.q_stereo = Queue()

        mth = ConsumerThread(self.q_mono, self.handle_monocular)
        mth.setDaemon(True)
        mth.start()

        sth = ConsumerThread(self.q_stereo, self.handle_stereo)
        sth.setDaemon(True)
        sth.start()

        self.mc = MonoCalibrator([self.board])
        self.sc = StereoCalibrator([self.board])
示例#30
0
 def __init__(self):
     super().__init__("getCamDataNode")
     self.bridge = cv_bridge.CvBridge()
     self.image_sub = self.create_subscription(Image, "/camera/image_raw", self.handle_camera_data, 10)