コード例 #1
0
 def __init__(self, defaultQuat, service_name='update_pose_target'):
     self.cameraCalib = CameraCalibration()
     rospy.wait_for_service(service_name, timeout=None)
     self._update_target = rospy.ServiceProxy(service_name, TrackPose)
     self.pose_target_listener = None
     self.mouth_target_listener = None
     self.defaultQuat = defaultQuat
コード例 #2
0
    def test_car_length(self):
        cc = CameraCalibration()

        X1, Y1 = cc.image2world(661.498, 465.096)
        X2, Y2 = cc.image2world(799.19, 617.622)

        length = np.sqrt((X2 - X1)**2 + (Y2 - Y1)**2)

        real_length = 11

        assert np.abs(length - real_length) < 4
コード例 #3
0
    def test_car_width(self):
        cc = CameraCalibration()

        X1, Y1 = cc.image2world(914.768, 603.941)
        X2, Y2 = cc.image2world(799.19, 617.622)

        width = np.sqrt((X2 - X1)**2 + (Y2 - Y1)**2)

        real_width = 2.55

        assert np.abs(width - real_width) < 0.2
コード例 #4
0
    def calibrate(self):
        """
        Compute the camera matrix 
        """
        self.calib = CameraCalibration(self.nColumns, self.nRows)
        self.calib.calibrate(self.srcFolder)
        self.params.calib = self.calib

        self.nFrames = len(self.calib.srcImgs)

        self.stream = ImageListVideoStream(self.calib.srcImgs)
        self._setDisplay()
        self._setDisplayMax()

        self._updateImgProvider()
コード例 #5
0
ファイル: calculate_speed.py プロジェクト: maoxin/lane
    def __init__(self, filename=join(dirname(dirname(realpath(__file__))), "data/cctv_ftg1_SPEED_000000_carLoc.csv"),
                 top_obs_rec=906, bottom_obs_rec=439,
                 window_size=10, start_time_stamp='2018-01-01T08:00'):
        self.start_time_stamp = start_time_stamp

        self.start_time = []
        self.car_type = []
        self.track_coord = []
        with open(filename, 'r') as f:
            for line in f:
                dt = line.strip().split("; ")

                self.start_time.append(float(dt[0]) * 0.1 / 3)
                self.car_type.append(dt[1])
                self.track_coord.append([eval(x) for x in dt[2:]])
                
        self.start_time = np.array(self.start_time)
        self.car_type = np.array(self.car_type)

        self.top_obs_rec = top_obs_rec
        self.bottom_obs_rec = bottom_obs_rec

        self.window_size = window_size
        
        if pd.to_datetime(self.start_time_stamp) < pd.to_datetime('2018-09-11T16:00'):
            self.cc = CameraCalibration(default=False)
            self.scale = 1
            self.top_obs_rec = 931.761
            self.bottom_obs_rec = 439.723
        else:
            self.cc = CameraCalibration(
                p1=(787.919, 640.099), p2=(594.782, 404.630), 
                p3=(675.065, 399.513), p4=(913.843, 619.450),
                u2=703.475, u4=823.241, default=False
            )
            self.scale = 31.05945702896952 / 51.07394876776243
            self.top_obs_rec = 793.968
            self.bottom_obs_rec = 337.152
コード例 #6
0
 def calibrate(self):
     """
     Compute the camera matrix 
     """
     self.calib = CameraCalibration(self.nColumns, self.nRows)
     self.calib.calibrate(self.srcFolder)
     self.params.calib = self.calib
     
     self.nFrames = len(self.calib.srcImgs)
     
     self.stream = ImageListVideoStream(self.calib.srcImgs)
     self._setDisplay()
     self._setDisplayMax()
     
     self._updateImgProvider()
コード例 #7
0
def main():
    from glob import glob
    from camera_calibration import CameraCalibration
    from thresholding import threshold
    from transform import PerspectiveTransform

    # Fixed image dimensions
    height = 720
    width = 1280

    # Prepare camera calibration
    print("Calibrating camera")
    calibration = CameraCalibration.default()

    # Prepare perspective transform
    transform = PerspectiveTransform.default(height, width)

    images = glob('test_images/straight*') + glob('test_images/test*')

    for fname in images:
        print("Processing", fname)

        # Run pipeline
        img = cv2.imread(fname)
        img = calibration.undistort(img)
        img, _ = threshold(img)
        # img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, (5,5))
        img = transform.transform(img)

        # Find lines using sliding windows
        left_lane, right_lane = find_lines_with_sliding_windows(img)

        # Plot sliding windows
        plot_sliding_windows(img, left_lane, right_lane)

        # combined_binary, color_binary = threshold(img, stack=True)
        #
        # f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(32, 9))
        # ax1.set_title('Stacked thresholds', fontsize=20)
        # ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        #
        # ax2.set_title('Result', fontsize=20)
        # ax2.imshow(combined_binary, cmap='gray')

        plt.savefig('output_images/sliding_windows_' + fname.split('/')[-1])
コード例 #8
0
def main():
    # # Fixed image dimensions
    height = 720
    width = 1280
    transform = PerspectiveTransform.default(height, width)
    from camera_calibration import CameraCalibration
    calibration = CameraCalibration.default()

    images = glob('test_images/straight*') + glob('test_images/test*')

    for fname in images:
        print("Processing", fname)
        image = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)
        image = calibration.undistort(image)

        polygonned = cv2.polylines(np.copy(image), [np.int32(transform.src)],
                                   False,
                                   color=255,
                                   thickness=1)

        transformed = transform.transform(np.copy(polygonned))

        inversed = transform.invert().transform(np.copy(transformed))

        f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(32, 9))
        ax1.set_title('Original', fontsize=20)
        ax1.imshow(image)

        ax2.set_title('Area of interest', fontsize=20)
        ax2.imshow(polygonned)

        ax3.set_title('Transformed', fontsize=20)
        ax3.imshow(transformed)

        ax4.set_title('Transformed inversed', fontsize=20)
        ax4.imshow(inversed)

        plt.savefig('output_images/transform_' + fname.split('/')[-1])
コード例 #9
0
def main():
    # Fixed image dimensions
    height = 720
    width = 1280

    # Prepare camera calibration
    print("Calibrating camera")
    calibration = CameraCalibration.default()
    transform = PerspectiveTransform.default(height, width)

    images = glob('test_images/straight*') + glob('test_images/test*')

    for fname in images:
        print("Processing", fname)
        org_image = cv2.imread(fname)
        if org_image.shape != (height, width, 3):
            print("skipping image", fname, "invalid dimensions", org_image.shape)
            continue

        # Run the pipeline on a copy of the image
        undistorted = calibration.undistort(np.copy(org_image))
        transformed = transform.transform(undistorted)
        warped_binary, _ = threshold(transformed, stack=False)
        # combined, _ = threshold(undistorted, stack=False)
        # warped_binary = transform.transform(combined)

        lane = Lane()
        lane.update(warped_binary)
        final = lane.overlay(org_image, draw_lines=False, fill_lane=True, transform=transform.invert())
        final = lane.overlay_text(final)
        final = cv2.polylines(final, [np.int32(transform.src)], color=[255, 0, 0], isClosed=False)

        cv2.imwrite('output_images/lane_{}'.format(fname.split('/')[-1]), final)

        final = lane.overlay(np.dstack((warped_binary, warped_binary, warped_binary)) * 255, draw_lines=True,
                             fill_lane=False)
        cv2.imwrite('output_images/lane_warped_{}'.format(fname.split('/')[-1]), final)
コード例 #10
0
class CalibrationIface(PlayerInterface):
    """
    Implements the PlayerInterface class with an ImageListVideoStream
    It uses the CameraCalibration class to compute the camera matrix from a set of images containing a
    chessboard pattern.
    """
    def __init__(self, app, context, parent, params, displayName, providerName, timerSpeed=200):
        PlayerInterface.__init__(self, app, context, parent, params, displayName, providerName, timerSpeed)
        
        self.nColumns = 9
        self.nRows = 6
        self.matrixType = 'normal'
        
        self._setDisplay()
        self._setDisplay()

    @pyqtSlot()
    def calibrate(self):
        """
        Compute the camera matrix 
        """
        self.calib = CameraCalibration(self.nColumns, self.nRows)
        self.calib.calibrate(self.srcFolder)
        self.params.calib = self.calib
        
        self.nFrames = len(self.calib.srcImgs)
        
        self.stream = ImageListVideoStream(self.calib.srcImgs)
        self._setDisplay()
        self._setDisplayMax()
        
        self._updateImgProvider()

    @pyqtSlot(result=QVariant)
    def getNRows(self):
        """
        Get the number of inner rows in the chessboard pattern
        """
        return self.nRows

    @pyqtSlot(QVariant)
    def setNRows(self, nRows):
        """
        Set the number of inner rows in the chessboard pattern
        """
        self.nRows = int(nRows)

    @pyqtSlot(result=QVariant)
    def getNColumns(self):
        """
        Get the number of inner columns in the chessboard pattern
        """
        return self.nColumns

    @pyqtSlot(QVariant)
    def setNColumns(self, nColumns):
        """
        Set the number of inner rows in the chessboard pattern
        """
        self.nColumns = int(nColumns)

    @pyqtSlot(result=QVariant)
    def getFolderPath(self):
        """
        Get the path to the folder where the images with the pattern are stored
        """
        diag = QFileDialog()
        srcFolder = diag.getExistingDirectory(parent=diag, caption="Chose directory",
                                            directory=os.getenv('HOME'))
        self.srcFolder = srcFolder
        return srcFolder

    @pyqtSlot(QVariant)
    def setMatrixType(self, matrixType):
        """
        Set the matrix type to be saved. Resolution independant (normal) or dependant (optimized)
        
        :param string matrixType: The type of matrix to be saved. One of ['normal', 'optimized']
        """
        matrixType = matrixType.lower()
        if matrixType not in ['normal', 'optimized']:
            raise KeyError("Expected one of ['normal', 'optimized'], got {}".format(matrixType))
        else:
            self.matrixType = matrixType

    @pyqtSlot()
    def saveCameraMatrix(self):
        """
        Save the camera matrix selected as self.matrixType
        """
        diag = QFileDialog()
        destPath = diag.getSaveFileName(parent=diag,
                                    caption='Save matrix',
                                    directory=os.getenv('HOME'), 
                                    filter='Numpy (.npy)')
        destPath = destPath[0]
        if destPath:
            if self.matrixType == 'normal':
                np.save(destPath, self.cameraMatrix)
            elif self.matrixType == 'optimized':
                np.save(destPath, self.optimalCameraMatrix)

    @pyqtSlot(QVariant)
    def setFrameType(self, frameType):
        """
        Selects the type of frame to be displayed. (Before, during or after distortion correction)
        
        :param string frameType: The selected frame type. One of ['source', 'detected', 'corrected']
        """
        frameType = frameType.lower()
        currentIndex = self.stream.currentFrameIdx
        if frameType == "source":
            imgs = self.calib.srcImgs
        elif frameType == "detected":
            imgs = self.calib.detectedImgs
        elif frameType == "corrected":
            imgs = self.calib.correctedImgs
        else:
            raise KeyError("Expected one of ['source', 'detected', 'corrected'], got {}".format(frameType))
        self.stream = ImageListVideoStream(imgs)
        self._updateImgProvider()
        self.stream.currentFrameIdx = self._validateFrameIdx(currentIndex -1) # reset to previous position
        self.getImg()
import cv2
from camera_calibration import CameraCalibration
import image_processing as imgproc
import numpy as np
from line import Line

scaled_size = 1
image_size = (int(1280 * scaled_size), int(720 * scaled_size))
offset = image_size[1] * 0.3

file_name = './output_images/undist_straight_lines1.jpg'

calibration = CameraCalibration('camera_cal/')
calibration.calibrate()
intrinsic_mat = calibration.get_intrinsic()
dist_paras = calibration.get_distortion_paras()

perspective_src_points = scaled_size * np.float32(
    [[233, 694], [595, 450], [686, 450], [1073, 694]])  # These points are manually selected
perspective_dst_points = np.float32([[offset, image_size[1]], [offset, 0],
                                     [image_size[0] - offset, 0], [image_size[0] - offset, image_size[1]]])

#######################
img = cv2.imread(file_name)
undist = calibration.distort_correction(img)
undist = cv2.resize(undist, (0, 0), fx=scaled_size, fy=scaled_size)

hls = cv2.cvtColor(undist, cv2.COLOR_BGR2HSV)
h = hls[:, :, 0]
s = hls[:, :, 1]
v = hls[:, :, 2]
コード例 #12
0
 def create_camera_calibration():
     return CameraCalibration.default()
コード例 #13
0
ファイル: calculate_speed.py プロジェクト: maoxin/lane
class CalculateSpeed(object):
    def __init__(self, filename=join(dirname(dirname(realpath(__file__))), "data/cctv_ftg1_SPEED_000000_carLoc.csv"),
                 top_obs_rec=906, bottom_obs_rec=439,
                 window_size=10, start_time_stamp='2018-01-01T08:00'):
        self.start_time_stamp = start_time_stamp

        self.start_time = []
        self.car_type = []
        self.track_coord = []
        with open(filename, 'r') as f:
            for line in f:
                dt = line.strip().split("; ")

                self.start_time.append(float(dt[0]) * 0.1 / 3)
                self.car_type.append(dt[1])
                self.track_coord.append([eval(x) for x in dt[2:]])
                
        self.start_time = np.array(self.start_time)
        self.car_type = np.array(self.car_type)

        self.top_obs_rec = top_obs_rec
        self.bottom_obs_rec = bottom_obs_rec

        self.window_size = window_size
        
        if pd.to_datetime(self.start_time_stamp) < pd.to_datetime('2018-09-11T16:00'):
            self.cc = CameraCalibration(default=False)
            self.scale = 1
            self.top_obs_rec = 931.761
            self.bottom_obs_rec = 439.723
        else:
            self.cc = CameraCalibration(
                p1=(787.919, 640.099), p2=(594.782, 404.630), 
                p3=(675.065, 399.513), p4=(913.843, 619.450),
                u2=703.475, u4=823.241, default=False
            )
            self.scale = 31.05945702896952 / 51.07394876776243
            self.top_obs_rec = 793.968
            self.bottom_obs_rec = 337.152

    def coord_image2word(self):
        self.Us = []
        self.Vs = []
        self.times = []
        for i, track in enumerate(self.track_coord):
            dt = np.dtype('float,float')
            track = np.array(track, dtype=dt)

            U = track['f0']
            V = track['f1']
            time = self.start_time[i] + np.arange(U.shape[0]) * 0.1

            idx = ((V >= self.bottom_obs_rec) & (V <= self.top_obs_rec))
            U = U[idx]
            V = V[idx]
            time = time[idx]

            self.Us.append(U)
            self.Vs.append(V)
            self.times.append(time)

        self.Xs = []
        self.Ys = []
        for U, V in zip(self.Us, self.Vs):
            X, Y = self.cc.image2world(U, V)
            self.Xs.append(X)
            self.Ys.append(Y)

        return self.Xs, self.Ys, self.times
    
    def get_speed(self):
        Xs, Ys, times = self.coord_image2word()
        self.speeds = []
        for X, Y, time in zip(Xs, Ys, times):
            # self.speeds.append(
                # ((np.sqrt(
                    # (X[self.window_size:] - X[:-self.window_size])**2 +
                    # (Y[self.window_size:] - Y[:-self.window_size])**2
                # ) / (time[self.window_size:] - time[:-self.window_size]) * 3.6)).mean()
            # )
            if len(time) > 1:
                self.speeds.append(np.sqrt((X[-1] - X[0])**2 + (Y[-1] - Y[0])**2) / (time[-1] - time[0]) * 3.6 * self.scale)
            # m/s -> km/h

        return self.speeds

    def get_df(self, save=False):
        df = pd.DataFrame(columns=['time', 'speed', 'car_type'])
        
        # all_speeds = np.hstack(self.speeds)
        all_speeds = self.speeds
        # all_times = [(tm[self.window_size:] + tm[:-self.window_size]) / 2 for tm in self.times]
        all_times = [(tm[-1] + tm[0]) / 2 for tm in self.times if len(tm) > 1]
        all_times = np.hstack(all_times).astype('timedelta64[s]') + np.datetime64(self.start_time_stamp)

        df.speed = all_speeds
        df.time = all_times
        i = 0
        for j in range(len(self.times)):
            # df.loc[i: i+self.speeds[j].shape[0], 'car_type'] = self.car_type[j]
            # i = i + self.speeds[j].shape[0]
            if len(self.times[j]) > 1:
                df.loc[i, 'car_type'] = self.car_type[j]
                i += 1

        if save:
            df.to_csv(join(dirname(dirname(realpath(__file__))), f'result/speeds_{self.start_time_stamp}.csv'), index=False)

        return df


    def draw_result(self):
        for i, (car_type, speed, time) in enumerate(zip(self.car_type, self.speeds, self.times)):
            plt.close()
            plt.plot((time[self.window_size:] + time[:-self.window_size]) / 2, speed, 'o-')
            plt.title(f"{car_type} speed")
            plt.xlabel('time (s)')
            plt.ylabel('speed (km/h)')
            plt.savefig(join(dirname(dirname(realpath(__file__))), f'result/speed/{i}.pdf'))
            plt.close()

        for i, (car_type, X, Y) in enumerate(zip(self.car_type, self.Xs, self.Ys)):
            plt.close()
            plt.plot(X, Y)
            plt.title(f"{car_type} track")
            plt.xlabel('X (m)')
            plt.ylabel('Y (m)')
            plt.savefig(join(dirname(dirname(realpath(__file__))), f'result/track/{i}.pdf'))
            plt.close()

        for i, (car_type, u, v) in enumerate(zip(self.car_type, self.Us, self.Vs)):
            plt.close()
            plt.plot(u, v)
            plt.title(f"{car_type} track")
            plt.xlabel('U (pixel)')
            plt.ylabel('V (pixel)')
            plt.savefig(join(dirname(dirname(realpath(__file__))),f'result/track_image/{i}.pdf'))
            plt.close()

        return 0
コード例 #14
0
        left_fit, right_fit = polyfit.fit_polynomial(left_fit_prev,
                                                     right_fit_prev)

        # Warp image with lanes back
        out_image = transform.warp_back(undist, polyfit)

        # Curvature and offset
        curvature = polyfit.curvature()
        offset = polyfit.offset()

        return out_image, left_fit, right_fit, curvature, offset


if __name__ == "__main__":
    # Undistortion parameters
    undis_param = CameraCalibration.load_pickle_with_undist_params(
        "camera_cal/wide_dist_pickle.p")

    # Threshold parameters
    ksize = 3  # Sobel kernel size
    s_thresh_img = (170, 255)
    s_thresh_vid = (80, 200)
    sx_thresh = (20, 200)
    sy_thresh = (20, 200)
    m_thresh = (30, 120)
    d_thresh = (0.7, 1.3)

    # Input/output folders of test images
    in_folder = os.getcwd() + "/test_images/"
    out_folder = os.getcwd() + "/output_images/"

    # Detect lanes on test images and write to out folder
コード例 #15
0
    def scaled_sobel(abs_sobel):
        return np.uint8(255 * abs_sobel / np.max(abs_sobel))

    @staticmethod
    def create_binary(image):
        return np.zeros_like(image)

    @staticmethod
    def region_of_interest(img, vertices):
        mask = np.zeros_like(img)
        if len(img.shape) > 2:
            channel_count = img.shape[2]
            ignore_mask_color = (255, ) * channel_count
        else:
            ignore_mask_color = 255
        cv2.fillPoly(mask, vertices, ignore_mask_color)
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image


run = False

if run:
    camcal = CameraCalibration()
    camcal.calibrate_camera()
    det = LaneLineDetector(camcal)

    output = 'harder_challenge_video_submit.mp4'
    clip1 = VideoFileClip('harder_challenge_video.mp4')
    clip = clip1.fl_image(det.pipeline)
    clip.write_videofile(output, audio=False)
コード例 #16
0
class CalibrationIface(PlayerInterface):
    """
    Implements the PlayerInterface class with an ImageListVideoStream
    It uses the CameraCalibration class to compute the camera matrix from a set of images containing a
    chessboard pattern.
    """
    def __init__(self,
                 app,
                 context,
                 parent,
                 params,
                 displayName,
                 providerName,
                 timerSpeed=200):
        PlayerInterface.__init__(self, app, context, parent, params,
                                 displayName, providerName, timerSpeed)

        self.nColumns = 9
        self.nRows = 6
        self.matrixType = 'normal'

        self._setDisplay()
        self._setDisplay()

    @pyqtSlot()
    def calibrate(self):
        """
        Compute the camera matrix 
        """
        self.calib = CameraCalibration(self.nColumns, self.nRows)
        self.calib.calibrate(self.srcFolder)
        self.params.calib = self.calib

        self.nFrames = len(self.calib.srcImgs)

        self.stream = ImageListVideoStream(self.calib.srcImgs)
        self._setDisplay()
        self._setDisplayMax()

        self._updateImgProvider()

    @pyqtSlot(result=QVariant)
    def getNRows(self):
        """
        Get the number of inner rows in the chessboard pattern
        """
        return self.nRows

    @pyqtSlot(QVariant)
    def setNRows(self, nRows):
        """
        Set the number of inner rows in the chessboard pattern
        """
        self.nRows = int(nRows)

    @pyqtSlot(result=QVariant)
    def getNColumns(self):
        """
        Get the number of inner columns in the chessboard pattern
        """
        return self.nColumns

    @pyqtSlot(QVariant)
    def setNColumns(self, nColumns):
        """
        Set the number of inner rows in the chessboard pattern
        """
        self.nColumns = int(nColumns)

    @pyqtSlot(result=QVariant)
    def getFolderPath(self):
        """
        Get the path to the folder where the images with the pattern are stored
        """
        diag = QFileDialog()
        srcFolder = diag.getExistingDirectory(parent=diag,
                                              caption="Chose directory",
                                              directory=os.getenv('HOME'))
        self.srcFolder = srcFolder
        return srcFolder

    @pyqtSlot(QVariant)
    def setMatrixType(self, matrixType):
        """
        Set the matrix type to be saved. Resolution independant (normal) or dependant (optimized)
        
        :param string matrixType: The type of matrix to be saved. One of ['normal', 'optimized']
        """
        matrixType = matrixType.lower()
        if matrixType not in ['normal', 'optimized']:
            raise KeyError(
                "Expected one of ['normal', 'optimized'], got {}".format(
                    matrixType))
        else:
            self.matrixType = matrixType

    @pyqtSlot()
    def saveCameraMatrix(self):
        """
        Save the camera matrix selected as self.matrixType
        """
        diag = QFileDialog()
        destPath = diag.getSaveFileName(parent=diag,
                                        caption='Save matrix',
                                        directory=os.getenv('HOME'),
                                        filter='Numpy (.npy)')
        destPath = destPath[0]
        if destPath:
            if self.matrixType == 'normal':
                np.save(destPath, self.cameraMatrix)
            elif self.matrixType == 'optimized':
                np.save(destPath, self.optimalCameraMatrix)

    @pyqtSlot(QVariant)
    def setFrameType(self, frameType):
        """
        Selects the type of frame to be displayed. (Before, during or after distortion correction)
        
        :param string frameType: The selected frame type. One of ['source', 'detected', 'corrected']
        """
        frameType = frameType.lower()
        currentIndex = self.stream.currentFrameIdx
        if frameType == "source":
            imgs = self.calib.srcImgs
        elif frameType == "detected":
            imgs = self.calib.detectedImgs
        elif frameType == "corrected":
            imgs = self.calib.correctedImgs
        else:
            raise KeyError(
                "Expected one of ['source', 'detected', 'corrected'], got {}".
                format(frameType))
        self.stream = ImageListVideoStream(imgs)
        self._updateImgProvider()
        self.stream.currentFrameIdx = self._validateFrameIdx(
            currentIndex - 1)  # reset to previous position
        self.getImg()
コード例 #17
0
class TrackerInterface:
    def __init__(self, defaultQuat, service_name='update_pose_target'):
        self.cameraCalib = CameraCalibration()
        rospy.wait_for_service(service_name, timeout=None)
        self._update_target = rospy.ServiceProxy(service_name, TrackPose)
        self.pose_target_listener = None
        self.mouth_target_listener = None
        self.defaultQuat = defaultQuat

    #### PUBLIC METHODS
    # we try to make it so that all of these methods are idempotent
    # and can be called from any state
    def start_updating_target_to_pose(self,
                                      target_pose_topic,
                                      robot_coord_offset=[0, 0, 0]):
        self.stop_moving()
        self.pose_target_listener = rospy.Subscriber(
            target_pose_topic, Pose, self._update_target_pose_robot_frame,
            (robot_coord_offset))

    def start_updating_target_to_point(self,
                                       mouth_point_topic,
                                       robot_coord_offset=[0, 0, 0]):
        self.stop_moving()
        self.mouth_target_listener = rospy.Subscriber(
            mouth_point_topic, PointStamped, self._update_target_camera_frame,
            (robot_coord_offset))

    def start_tracking_fixed_pose(self, robot_coord_point, robot_coord_quat):
        self.stop_moving()
        # you just send the target point, you don't need to continually update it
        self._update_target(target=Pose(
            Point(robot_coord_point[0], robot_coord_point[1],
                  robot_coord_point[2]), robot_coord_quat))

    def start_tracking_fixed_target(self, robot_coord_point):
        self.start_tracking_fixed_pose(robot_coord_point, self.defaultQuat)

    def stop_moving(self):
        self._stop_updating_target()
        self._update_target(stopMotion=True)

    #### PRIVATE METHODS #####
    def _stop_updating_target(self):
        if (self.mouth_target_listener is not None):
            self.mouth_target_listener.unregister()
        if (self.pose_target_listener is not None):
            self.pose_target_listener.unregister()

    # return an np.array of the [x,y,z] target mouth location
    # in the coordinate frame of the robot base
    def _convert_camera_to_robot_frame(self, mouth_pos):
        t = mouth_pos.point
        point_in_camera_frame = np.array([t.x, t.y, t.z, 1])
        point_in_robot_frame = self.cameraCalib.convert_to_robot_frame(
            point_in_camera_frame)
        return point_in_robot_frame[0:3]

    # compute and move toward the target mouth location
    # only move for at most timeoutSecs,
    def _update_target_camera_frame(self,
                                    mouth_pos,
                                    robot_coord_offset=[0, 0, 0]):
        endLoc = self._convert_camera_to_robot_frame(mouth_pos) + np.array(
            robot_coord_offset)
        self._update_target(target=Pose(Point(endLoc[0], endLoc[1], endLoc[2]),
                                        self.defaultQuat))

    # move toward the target spoon pose
    def _update_target_pose_robot_frame(self,
                                        target_pose,
                                        robot_coord_offset=[0, 0, 0]):
        newPosition = Point(target_pose.position.x + robot_coord_offset[0],
                            target_pose.position.y + robot_coord_offset[1],
                            target_pose.position.z + robot_coord_offset[2])
        self._update_target(target=Pose(newPosition, target_pose.orientation))
コード例 #18
0
    grad_color_stack, img = color_gradient_threshold_pipeline(image, img_processor)
    birds_eye, M, Minv = calibration.perspective_transform(img, src, dst)
    undist = calibration.undistort(image)
    search, left_fitx, right_fitx, ploty, image_final = find_lane_lines.find(birds_eye, undist, Minv, left_line, right_line)

    return image_final


#__main__

# Camera Calibration
img = mpimg.imread('camera_cal/calibration1.jpg')
img_size = (img.shape[1], img.shape[0])

calibration = CameraCalibration(chessboard_images_dir='camera_cal', pattern_size=(9,6))
calibration.calibrate_camera(img_size)

# Image Processor
img_processor = ImageProcessor()

# src and dst for perspective transform
test_img = mpimg.imread('test_images/straight_lines2.jpg')
test_img_size = (test_img.shape[1], test_img.shape[0])

src = np.float32([[580,460], [710,460], [1150,720], [150,720]])
offset = 200
dst = np.float32([ [offset, 0],
                   [test_img_size[0]-offset, 0],
                   [test_img_size[0]-offset, test_img_size[1]-0],
                   [offset, test_img_size[1]-0]
コード例 #19
0
                continue
        gtDistances += cv_dists
        tofDistances += tof_dists
        stereoDistances += stereo_dists
        # print("-----image ", fname, "-----")
        # for i in range(len(cv_dists)):
        #     print('corner {:02d} has gt = {:.1f}, tof = {:.1f}, stereo = {:.1f}'.format(
        #         i, cv_dists[i], tof_dists[i], stereo_dists[i]))
    return [gtDistances, tofDistances, stereoDistances]


if __name__ == '__main__':
    square_size = 26.3571
    width = 6
    height = 5
    cal_obj = CameraCalibration(square_size, width, height)
    rgbModel = 'rgbCamera.yml'
    tofModel = 'irCamera.yml'
    stereoModel = 'stereo.yml'
    cal_obj.load_cameraModel(rgbModel, tofModel, stereoModel)

    dirpath = '../data/bias_calibration/corner*'
    gtDistances, tofs, stereo_distances = calibrateDepth(
        cal_obj,
        dirpath,
        irPrefix='Gray_',
        depthPrefix='DepthImage_',
        rgbPrefix='RBG_',
        ifVisualization=False,
        ifDivide16=True,
        ifUseRGB=True)