Ejemplo n.º 1
0
    def __init__(self, img):
        self.image = img
        self.HEIGHT = img.shape[0]
        self.WIDTH = img.shape[1]
        self.cte = 0
        self.center_line = Line(self.WIDTH/2,self.HEIGHT,self.WIDTH/2,self.HEIGHT/2)
        self.angle_steer = 0
        self.speed = 10
        self.dt = 0.1
        self.yaw = 90   #  phi(center,ox)

        ##initialize MPC
        x_start = 140
        y_start = 240
        yaw = -1.4489
        v = 10

        #region of interest
        self.vertices = np.array([[(int(0*img.shape[1]),img.shape[0]),(int(0*img.shape[1]), 
                    int(0.15*img.shape[0])), (int(1.00*img.shape[1]), int(0.15*img.shape[0])), (
                    img.shape[1],img.shape[0])]], dtype=np.int32)
        #img = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
        self.final_img, self.left_line, self.right_line = LD.color_frame_pipeline([self.image], solid_lines=True)
        self.speed_pub = rospy.Publisher("Team1_speed", Float32, queue_size = 10)
        self.steer_pub = rospy.Publisher("Team1_steerAngle", Float32, queue_size = 10)
        # start_time = time()
        #self.sign_image, self.DIRECTION = TD.detect_sign(img)
        # end_time = time()
        # duration = end_time-start_time
        # print(duration,"duration")
        rate = rospy.Rate(30)
Ejemplo n.º 2
0
def open_zed():
    print("Running...")
    init = sl.InitParameters()
    cam = sl.Camera()
    if not cam.is_opened():
        print("Opening ZED Camera...")
    status = cam.open(init)
    if status != sl.ERROR_CODE.SUCCESS:
        print(repr(status))
        exit()

    runtime = sl.RuntimeParameters()
    mat = sl.Mat()

    key = ''
    while key != 113:  # for 'q' key
        err = cam.grab(runtime)
        if err == sl.ERROR_CODE.SUCCESS:
            cam.retrieve_image(mat, sl.VIEW.VIEW_LEFT)
            instance_image = cv2.imread(mat.get_data(), 0)

            out_path = os.getcwd()
            in_image = cv2.cvtColor(instance_image, cv2.COLOR_BGR2RGB)
            plt.imshow("hi", in_image)
            return
            out_image = color_frame_pipeline([in_image], solid_lines=True)
            cv2.imwrite(out_path, cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR))
            plt.imshow(out_image)
            # print("hello")

            plt.waitforbuttonpress()
            plt.close('all')

            # cv2.imshow("ZED", mat.get_data())
            key = cv2.waitKey(5)
            settings(key, cam, runtime, mat)
        else:
            key = cv2.waitKey(5)
    cv2.destroyAllWindows()

    cam.close()
    print("\nFINISH")
Ejemplo n.º 3
0
        figManager.resize(*figManager.window.maxsize())

    # test on images
    test_images_dir = join('data', 'test_images')
    test_images = [
        join(test_images_dir, name) for name in os.listdir(test_images_dir)
    ]

    for test_img in test_images:

        print('Processing image: {}'.format(test_img))

        out_path = join('out', 'images', basename(test_img))
        in_image = cv2.cvtColor(cv2.imread(test_img, cv2.IMREAD_COLOR),
                                cv2.COLOR_BGR2RGB)
        out_image = color_frame_pipeline([in_image], solid_lines=True)
        cv2.imwrite(out_path, cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR))
        if verbose:
            plt.imshow(out_image)
            plt.waitforbuttonpress()
    plt.close('all')

    # test on videos
    test_videos_dir = join('data', 'test_videos')
    test_videos = [
        join(test_videos_dir, name) for name in os.listdir(test_videos_dir)
    ]

    for test_video in test_videos:

        print('Processing video: {}'.format(test_video))
Ejemplo n.º 4
0
import matplotlib.pyplot as plt
import cv2
import os
from os.path import join, basename
from lane_detection import color_frame_pipeline


RESIZE_H = 540
RESIZE_W = 960
WORKING_DIR = '/Users/dwang/self-driving-car/'
verbose = True


if __name__ == '__main__':
    if verbose:
        plt.ion()

    test_images_dir = join(WORKING_DIR, 'project_1_lane_finding_basic/data/test_images/')
    test_images = [join(test_images_dir, name) for name in os.listdir(test_images_dir)]

    for test_img in test_images:
        out_path = join(WORKING_DIR, 'out/images', basename(test_img))
        print "processing image: {} to {}".format(test_img, out_path)
        in_image = cv2.cvtColor(cv2.imread(test_img, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
        out_image = color_frame_pipeline([in_image], solid_lines=True)
        cv2.imwrite(out_path, cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR))
    plt.close('all')
Ejemplo n.º 5
0
# test on images
    test_images_dir = join('data', 'Our_test_images')
    #test_images_dir = join('data', 'Our_test_images')
    test_images = [
        join(test_images_dir, name) for name in os.listdir(test_images_dir)
    ]

    for test_img in test_images:

        print('Processing image: {}'.format(test_img))

        out_path = join('out', 'images', basename(test_img))
        in_image = cv2.cvtColor(cv2.imread(test_img, cv2.IMREAD_COLOR),
                                cv2.COLOR_BGR2RGB)
        out_image = color_frame_pipeline([in_image], solid_lines=True)
        cv2.imwrite(out_path, cv2.cvtColor(out_image, cv2.COLOR_RGB2BGR))
        if verbose:
            plt.imshow(out_image)
            plt.waitforbuttonpress()
    plt.close('all')
    # test on videos
    test_videos_dir = join('data', 'test_videos')
    test_videos = [
        join(test_videos_dir, name) for name in os.listdir(test_videos_dir)
    ]
'''
    for test_video in test_videos:

        print('Processing video: {}'.format(test_video))
Ejemplo n.º 6
0
    (h, w) = frame.shape[:2]
    r = 320 / float(w)
    dim = (320, int(h * r))
    frame = cv2.resize(frame, dim, cv2.INTER_AREA)
    # resize to 320 x 180 for wide frame
    frame = frame[0:180, 0:320]
    frame = cv2.rectangle(frame, (265, 105), (305, 55), (0, 255, 0), 2)
    # crop for CNN model, i.e., traffic sign location
    # can be adjusted based on camera angle
    image = frame[55:105, 265:305]

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    #frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
    frame_buffer.append(frame)
    blend_frame, lane_lines = color_frame_pipeline(frames=frame_buffer, \
           solid_lines=True, \
           temporal_smoothing=True)

    # prepare the image to be classified by our deep learning network
    image = cv2.resize(image, (28, 28), cv2.INTER_AREA)
    #cv2.imshow("Image",image)
    image = image.astype("float") / 255.0
    image = img_to_array(image)
    image = np.expand_dims(image, axis=0)

    # classify the input image and initialize the label and
    # probability of the prediction

    # classify the input image
    low_speed, RR, signal, high_speed, stop, yield1 = model.predict(image)[0]
    label = "Not Stop"