def getImage(self, idx):
     topic, data, stamp = self.bag._read_message(self.index[idx].position)
     if self.perform_synchronization:
         timestamp = acv.Time(
             self.timestamp_corrector.getLocalTime(
                 data.header.stamp.to_sec()))
     else:
         timestamp = acv.Time(data.header.stamp.secs,
                              data.header.stamp.nsecs)
     if data._type == 'mv_cameras/ImageSnappyMsg':
         if self.uncompress is None:
             from snappy import uncompress
             self.uncompress = uncompress
         img_data = np.reshape(self.uncompress(
             np.fromstring(data.data, dtype='uint8')),
                               (data.height, data.width),
                               order="C")
     elif data.encoding == "16UC1" or data.encoding == "mono16":
         image_16u = np.array(self.CVB.imgmsg_to_cv2(data))
         img_data = (image_16u / 256).astype("uint8")
     elif data.encoding == "8UC1" or data.encoding == "mono8":
         img_data = np.array(self.CVB.imgmsg_to_cv2(data))
     elif data.encoding == "8UC3" or data.encoding == "rgb8":
         img_data = np.array(self.CVB.imgmsg_to_cv2(data))
         img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
     elif data.encoding == "8UC4" or data.encoding == "bgra8":
         img_data = np.array(self.CVB.imgmsg_to_cv2(data))
         img_data = cv2.cvtColor(img_data, cv2.COLOR_BGRA2GRAY)
     else:
         raise RuntimeError(
             "Unsupported Image format '{}' (Supported are: 16UC1 / mono16, 8UC1 / mono8, 8UC3 / rgb8, 8UC4 / bgra8 and ImageSnappyMsg)"
             .format(data.encoding))
     return (timestamp, img_data)
 def getMessage(self,idx):
     topic, data, stamp = self.bag._read_message(self.index[idx].position)
     if self.perform_synchronization:
         timestamp = acv.Time(self.timestamp_corrector.getLocalTime(data.header.stamp.to_sec()))
     else:
         timestamp = acv.Time( data.header.stamp.secs, data.header.stamp.nsecs )
     omega = np.array( [data.angular_velocity.x, data.angular_velocity.y, data.angular_velocity.z])
     alpha = np.array( [data.linear_acceleration.x, data.linear_acceleration.y, data.linear_acceleration.z] )
     
     return (timestamp, omega, alpha)
Exemple #3
0
 def getData(self, idx):
     topic, data, stamp = self.bag._read_message(self.index[idx].position)
     if self.perform_synchronization:
         timestamp = acv.Time(
             self.timestamp_corrector.getLocalTime(
                 data.header.stamp.to_sec()))
     else:
         timestamp = acv.Time(data.header.stamp.secs,
                              data.header.stamp.nsecs)
     data = self.parser.parseData(data)
     return timestamp, data
Exemple #4
0
    def loadImuData(self):
        print "Reading IMU data ({0})".format(self.dataset.topic)

        # prepare progess bar
        iProgress = sm.Progress2(self.dataset.numMessages())
        iProgress.sample()

        Rgyro = np.eye(
            3) * self.gyroUncertaintyDiscrete * self.gyroUncertaintyDiscrete
        Raccel = np.eye(
            3) * self.accelUncertaintyDiscrete * self.accelUncertaintyDiscrete

        # Now read the imu measurements.
        imu = []
        for timestamp, omega, alpha in self.dataset:
            timestamp = acv.Time(timestamp.toSec())
            imu.append(
                self.ImuMeasurement(timestamp, omega, alpha, Rgyro, Raccel))
            iProgress.sample()

        self.imuData = imu

        if len(self.imuData) > 1:
            print "\r  Read %d imu readings over %.1f seconds                   " % (
                len(imu), imu[-1].stamp.toSec() - imu[0].stamp.toSec())
        else:
            sm.logFatal(
                "Could not find any IMU messages. Please check the dataset.")
            sys.exit(-1)
Exemple #5
0
    def findCameraTimespan(self):
        tStart = acv.Time(0.0)
        tEnd = acv.Time(0.0)

        for cam in self.camList:
            if len(cam.targetObservations) > 0:
                tStartCam = cam.targetObservations[0].time()
                tEndCam = cam.targetObservations[-1].time()

                if tStart.toSec() > tStartCam.toSec():
                    tStart = tStartCam

                if tEndCam.toSec() > tEnd.toSec():
                    tEnd = tEndCam

        self.timeStart = tStart
        self.timeStart = tEnd
 def getImage(self,idx):
     topic, data, stamp = self.bag._read_message(self.index[idx].position)
     ts = acv.Time( data.header.stamp.secs, data.header.stamp.nsecs )
     if data._type == 'mv_cameras/ImageSnappyMsg':
         if self.uncompress is None:
             from snappy import uncompress
             self.uncompress = uncompress
         img_data = np.reshape(self.uncompress(np.fromstring(data.data, dtype='uint8')),(data.height,data.width), order="C")
         
     else:
         img_data = np.array(self.CVB.imgmsg_to_cv2(data))  
     return (ts, img_data)
Exemple #7
0
    def getImage(self, idx):
        topic, data, stamp = self.bag._read_message(self.index[idx].position)
        if self.perform_synchronization:
            timestamp = acv.Time(
                self.timestamp_corrector.getLocalTime(
                    data.header.stamp.to_sec()))
        else:
            timestamp = acv.Time(data.header.stamp.secs,
                                 data.header.stamp.nsecs)
        if data._type == 'mv_cameras/ImageSnappyMsg':
            if self.uncompress is None:
                from snappy import uncompress
                self.uncompress = uncompress
            img_data = np.reshape(self.uncompress(np.fromstring(data.data, dtype='uint8')),\
                                  (data.height,data.width), order="C")

        else:
            img_data = np.array(self.CVB.imgmsg_to_cv2(data))

        if img_data.ndim == 3:
            img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)

        return (timestamp, img_data)
Exemple #8
0
    def getMessage(self, idx):
        topic, data, stamp = self.bag._read_message(self.index[idx].position)

        timestamp = acv.Time(data.header.stamp.secs, data.header.stamp.nsecs)
        omega = np.array([
            data.angular_velocity.x, data.angular_velocity.y,
            data.angular_velocity.z
        ])
        alpha = np.array([
            data.linear_acceleration.x, data.linear_acceleration.y,
            data.linear_acceleration.z
        ])

        return (timestamp, omega, alpha)
Exemple #9
0
 def getImage(self, idx):
     topic, data, stamp = self.bag._read_message(self.index[idx].position)
     if self.perform_synchronization:
         timestamp = acv.Time(
             self.timestamp_corrector.getLocalTime(
                 data.header.stamp.to_sec()))
     else:
         timestamp = acv.Time(data.header.stamp.secs,
                              data.header.stamp.nsecs)
     if data._type == 'mv_cameras/ImageSnappyMsg':
         if self.uncompress is None:
             from snappy import uncompress
             self.uncompress = uncompress
         img_data = np.reshape(self.uncompress(
             np.fromstring(data.data, dtype='uint8')),
                               (data.height, data.width),
                               order="C")
     elif data._type == 'sensor_msgs/CompressedImage':
         # compressed images only have either mono or BGR normally (png and jpeg)
         # https://github.com/ros-perception/vision_opencv/blob/906d326c146bd1c6fbccc4cd1268253890ac6e1c/cv_bridge/src/cv_bridge.cpp#L480-L506
         img_data = np.array(self.CVB.compressed_imgmsg_to_cv2(data))
         if len(img_data.shape) > 2 and img_data.shape[2] == 3:
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
     elif data._type == 'sensor_msgs/Image':
         if data.encoding == "16UC1" or data.encoding == "mono16":
             image_16u = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = (image_16u / 256).astype("uint8")
         elif data.encoding == "8UC1" or data.encoding == "mono8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
         elif data.encoding == "8UC3" or data.encoding == "bgr8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2GRAY)
         elif data.encoding == "rgb8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_RGB2GRAY)
         elif data.encoding == "8UC4" or data.encoding == "bgra8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BGRA2GRAY)
         # bayes encodings conversions from
         # https://github.com/ros-perception/image_pipeline/blob/6caf51bd4484ae846cd8a199f7a6a4b060c6373a/image_proc/src/libimage_proc/processor.cpp#L70
         elif data.encoding == "bayer_rggb8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BAYER_BG2GRAY)
         elif data.encoding == "bayer_bggr8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BAYER_RG2GRAY)
         elif data.encoding == "bayer_gbrg8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BAYER_GR2GRAY)
         elif data.encoding == "bayer_grbg8":
             img_data = np.array(self.CVB.imgmsg_to_cv2(data))
             img_data = cv2.cvtColor(img_data, cv2.COLOR_BAYER_GB2GRAY)
         else:
             raise RuntimeError(
                 "Unsupported Image Encoding: '{}'\nSupported are: "
                 "16UC1 / mono16, 8UC1 / mono8, 8UC3 / rgb8 / bgr8, 8UC4 / bgra8, "
                 "bayer_rggb8, bayer_bggr8, bayer_gbrg8, bayer_grbg8".
                 format(data.encoding))
     else:
         raise RuntimeError(
             "Unsupported Image Type: '{}'\nSupported are: "
             "mv_cameras/ImageSnappyMsg, sensor_msgs/CompressedImage, sensor_msgs/Image"
             .format(data._type))
     return (timestamp, img_data)
    def extract(self):
        # Load image paths
        print("Loading camera images from: " + self.cam_path)
        cam_files = [
            f for f in os.listdir(self.cam_path)
            if os.path.isfile(os.path.join(self.cam_path, f))
        ]
        cam_files = sorted(cam_files, key=natural_keys)

        file_count = 0
        filename_num = file_count + 1
        for f in cam_files:
            image_path = os.path.join(self.cam_path, f)
            image = cv2.imread(image_path, 0)
            np_image = np.array(image)

            # Detect target for camera
            split_file = f.rsplit('.', 1)
            timestamp = acv.Time(0, 0)
            success, observation = self.mono_validator.target.detector.findTarget(
                timestamp, np_image)
            observation.clearImage()

            if success:
                # Display image
                print("Processing image" + ' ' + str(filename_num))

                # Get imu angle
                curr_imu_angle = self.imu_angles[file_count, 0:]
                [image_corners, reproj_corners,
                 meanReprojectionError] = self.mono_validator.generateMonoview(
                     np_image, observation, success)
                target_corners = observation.getCornersTargetFrame()
                T_t_c = observation.T_t_c()
                T_c_t = np.linalg.inv(T_t_c.T())

                # Create output directory
                output_path = self.cam_path + "/points_data"
                create_directory(output_path)

                # Create output file
                file_name = str(filename_num) + ".txt"
                output_path = os.path.join(output_path, file_name)
                outfile = open(output_path, "w")
                # -- Output Gridpoints
                outfile.write("gridpoints:\n")
                for corner, pixel in zip(target_corners, image_corners):
                    line = "{:.5f}".format(corner[0]) + " "
                    line += "{:.5f}".format(corner[1]) + " "
                    line += "{:.5f}".format(corner[2]) + " "
                    line += "{:.5f}".format(pixel[0]) + " "
                    line += "{:.5f}".format(pixel[1]) + "\n"
                    outfile.write(line)
                # -- Ouput Transform
                outfile.write("tmatrix:\n")
                tmatrix = np.array(T_c_t)
                tmatrix_str = '\n'.join(" ".join('%0.5f' % (x) for x in y)
                                        for y in tmatrix)
                outfile.write(tmatrix_str)
                # -- Output Gimbal angles
                outfile.write("\ngimbalangles:\n")
                for i in range(len(curr_imu_angle)):
                    outfile.write(str(curr_imu_angle[i]) + ' ')
                outfile.write("\n")

                # outfile.write("reprojectionerror:\n")
                # outfile.write(str(np.mean(meanReprojectionError)))

                # Finish output
                outfile.write("end:")
                outfile.close()

            file_count += 1
            filename_num += 1