예제 #1
0
def main():
    global theta_div
    cap = cv2.VideoCapture(0)
    ticks = 0
    lt = track.LaneTracker(2, 0.1, 150)
    ld = detect.LaneDetector(120)
    while cap.isOpened():
        prec_tick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - prec_tick) / cv2.getTickFrequency()

        ret, frame = cap.read()

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]),
                     (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
            cv2.line(frame, (predicted[1][0], predicted[1][1]),
                     (predicted[1][2], predicted[1][3]), (0, 0, 255), 5)
            theta1 = np.arctan2((predicted[0][3] - predicted[0][1]),
                                (predicted[0][2] - predicted[0][0]))
            theta2 = np.arctan2((predicted[1][3] - predicted[1][1]),
                                (predicted[1][2] - predicted[1][0]))
            div = (np.pi - np.abs(theta1) - np.abs(theta2)) / 2
            theta_div = (np.pi - np.abs(theta1) - div) - np.pi / 2
        lt.update(lanes)
        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #2
0
    def main(self):
        # cap = cv2.VideoCapture(0)
        cap = cv2.VideoCapture('project_video.mp4')
        ticks = 0

        lt = track.LaneTracker(2, 0.1, 500)
        ld = detect.LaneDetector(180)
        while cap.isOpened():
            precTick = ticks
            ticks = cv2.getTickCount()
            dt = (ticks - precTick) / cv2.getTickFrequency()

            ret, frame = cap.read()
            predicted = lt.predict(dt)
            lanes = ld.detect(frame)

            if predicted is not None:
                cv2.line(frame, (predicted[0][0], predicted[0][1]),
                         (predicted[0][2], predicted[0][3]), (0, 0, 255), 7)
                cv2.line(frame, (predicted[1][2], predicted[1][3]),
                         (predicted[1][0], predicted[1][1]), (0, 0, 255), 7)

                center_coordinates_top = (
                    (abs(predicted[1][0] - predicted[0][2]) / 2) +
                    predicted[0][2], predicted[1][1])
                center_coordinates_bottom = (
                    (abs(predicted[0][0] - predicted[1][2]) / 2) +
                    predicted[0][0], predicted[0][1])

                cv2.line(frame, center_coordinates_top,
                         center_coordinates_bottom, (0, 2555, 0), 7)

                error = frame.shape[1] / 2 - (
                    abs(predicted[0][0] - predicted[1][2]) / 2 +
                    predicted[0][0])
                a = [0, 352]
                b = [
                    center_coordinates_bottom[0], center_coordinates_bottom[1]
                ]
                c = [center_coordinates_top[0], center_coordinates_top[1]]

                ba = np.subtract(a, b)
                bc = np.subtract(c, b)

                cosine_angle = np.dot(
                    ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
                angle = 90. - np.degrees(np.arccos(cosine_angle))

                cv2.putText(frame, 'Crosstrack Error:' + str('%.2f' % error),
                            (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                            (255, 255, 255), 2)
                cv2.putText(frame, 'Angle:' + str(angle[0]), (20, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

            lt.update(lanes)
            try:
                self.lane_image_pub.publish(
                    self.bridge.cv2_to_imgmsg(frame, "bgr8"))
            except CvBridgeError as e:
                print(e)
예제 #3
0
def main():
    images = [
        cv2.imread(file) for file in glob.glob("S:\CNN\Matlab\data/*.png")
    ]

    ticks = 0

    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(180)
    for frame in images:

        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()

        ##        ret, frame = cap.read()

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]),
                     (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
            cv2.line(frame, (predicted[1][0], predicted[1][1]),
                     (predicted[1][2], predicted[1][3]), (0, 0, 255), 5)

        lt.update(lanes)

        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #4
0
def main(video_path):
    cap = cv2.VideoCapture(video_path)

    ticks = 0

    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(180)
    while cap.isOpened():
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()

        ret, frame = cap.read()

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]),
                     (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
            cv2.line(frame, (predicted[1][0], predicted[1][1]),
                     (predicted[1][2], predicted[1][3]), (0, 0, 255), 5)

        lt.update(lanes)

        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #5
0
def main(video_path):
    cap = cv2.VideoCapture(video_path)

    ticks = 0

    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(0)
    while cap.isOpened():
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()
        dt *= 0.2

        ret, frame = cap.read()
        frame = cv2.resize(frame, (1280, 720), interpolation=cv2.INTER_CUBIC)

        time1 = time.time()
        predicted = lt.predict(dt)

        lanes, frame2 = ld.detect(frame)
        print time.time() - time1
        imshape = frame.shape
        vertices = np.array([[(.55 * imshape[1], 0.67 * imshape[0]),
                              (imshape[1], imshape[0]), (0, imshape[0]),
                              (.45 * imshape[1], 0.67 * imshape[0])]],
                            dtype=np.int32)
        cv2.polylines(frame, [vertices], True, (0, 255, 0), 3)

        predicted2 = []
        if lanes.count(None) == 0:
            #print lanes
            predicted2.append(map(int, list(lanes[0])))
            predicted2.append(map(int, list(lanes[1])))
        else:
            predicted2 = None

        if predicted is not None:
            cv2.line(frame, (int(predicted[0][0]), int(predicted[0][1])),
                     (int(predicted[0][2]), int(predicted[0][3])), (0, 0, 255),
                     5)
            cv2.line(frame, (int(predicted[1][0]), int(predicted[1][1])),
                     (int(predicted[1][2]), int(predicted[1][3])), (0, 0, 255),
                     5)
        '''
  	if predicted2 is not None:
	  cv2.line(frame, (int(predicted2[0][0]), int(predicted2[0][1])), (int(predicted2[0][2]), int(predicted2[0][3])), (255, 0, 0), 5)
	  cv2.line(frame, (int(predicted2[1][0]), int(predicted2[1][1])), (int(predicted2[1][2]), int(predicted2[1][3])), (255, 0, 0), 5)
	'''

        lt.update(lanes)

        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #6
0
def main():
    global pos
    pos = 0
    # t1 = threading.Thread(target=camera_thread)
    t2 = threading.Thread(target=pid_thread)
    # t1.start()
    t2.start()
    # t1.join()
    cap = cv2.VideoCapture(0)
    ticks = 0
    lt = track.LaneTracker(2, 0.1, 150)
    ld = detect.LaneDetector(120)
    while cap.isOpened():
        prec_tick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - prec_tick) / cv2.getTickFrequency()

        ret, frame = cap.read()

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]), (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
            cv2.line(frame, (predicted[1][0], predicted[1][1]), (predicted[1][2], predicted[1][3]), (0, 0, 255), 5)
            theta1 = np.arctan2((predicted[0][3] - predicted[0][1]), (predicted[0][2] - predicted[0][0]))
            theta2 = np.arctan2((predicted[1][3] - predicted[1][1]), (predicted[1][2] - predicted[1][0]))
            div = (np.pi - np.abs(theta1) - np.abs(theta2)) / 2
            lock.acquire()
            try:
                pos = ((np.pi - np.abs(theta1) - div) - np.pi / 2) * 50 / np.pi
            finally:
                lock.release()
        lt.update(lanes)
        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    t2.join()
예제 #7
0
def main(input_vidoe_path):
    # input video
    cap = cv2.VideoCapture(input_vidoe_path)
    # initialize detector and tracker here
    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(180)
    # initialize time tick
    ticks = 0

    while cap.isOpened():
        # read frame
        ret, frame = cap.read()
        # start count tick
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()
        # start predict and detect lane
        # Kalman filter predict step
        predicted = lt.predict(dt)
        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]),
                     (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
            cv2.line(frame, (predicted[1][0], predicted[1][1]),
                     (predicted[1][2], predicted[1][3]), (0, 0, 255), 5)
        # Kalman filter update step
        lt.update(lanes)

        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
예제 #8
0
def main(video_path):
    cap = cv2.VideoCapture(video_path)

    ret, img = cap.read()
    height, width, layer = img.shape
    print height, width, layer

    screen_rate = 0.4
    width = int(width * screen_rate)
    height = int(height * screen_rate)

    ticks = 0

    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(180)
    while cap.isOpened():
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()

        ret, frame = cap.read()
        frame = cv2.resize(frame,
                           None,
                           fx=screen_rate,
                           fy=screen_rate,
                           interpolation=cv2.INTER_CUBIC)

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        #goruntu ortasi
        cv2.line(frame, (int(width / 2), 0), (int(width / 2), height),
                 (255, 255, 255), 2)

        if predicted is not None:
            Ax = predicted[0][2]
            Ay = predicted[0][3]

            Bx = predicted[0][0]
            By = predicted[0][1]

            Cx = predicted[1][2]
            Cy = predicted[1][3]

            Dx = predicted[1][0]
            Dy = predicted[1][1]

            #koseler
            cv2.line(frame, (Ax, Ay), (Bx, By), (0, 255, 255), 5)
            cv2.line(frame, (Cx, Cy), (Dx, Dy), (0, 255, 255), 5)

            #alan
            ic = np.array([[Ax, Ay], [Bx, By], [Cx, Cy], [Dx, Dy]], np.int32)

            #alanin ortasi
            cv2.line(frame, ((Ax + Dx) / 2, (Ay + Dy) / 2),
                     ((Bx + Cx) / 2, (By + Cy) / 2), (0, 0, 255), 5)

            cv2.circle(frame, ((Ax + Dx) / 2, (Ay + Dy) / 2), 5, (255, 0, 0),
                       -1)

            cv2.line(frame, ((Ax + Dx) / 2, (Ay + Dy) / 2),
                     (int(width / 2), (Ay + Dy) / 2), (0, 255, 0), 3)

            veri = "s" + str(((Ax + Dx) / 2) - (width / 2))
            cv2.putText(frame, veri, ((Ax + Dx) / 2, (Ay + Dy) / 2), font, 0.5,
                        (255, 255, 255), 1, cv2.LINE_AA)

            #kose isimleri
            cv2.putText(frame, 'A', (Ax, Ay), font, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, 'B', (Bx, By), font, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, 'C', (Cx, Cy), font, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.putText(frame, 'D', (Dx, Dy), font, 0.5, (255, 255, 255), 1,
                        cv2.LINE_AA)

            overlay = frame.copy()
            cv2.fillPoly(overlay, [ic], (100, 50, 50, 50))
            opacity = 0.4
            cv2.addWeighted(overlay, opacity, frame, 1 - opacity, 0, frame)

        lt.update(lanes)

        cv2.imshow('output', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #9
0
from __future__ import division

import cv2
import numpy as np
import track
import detect
import time
from grabscreen import grab_screen

ticks = 0
road_horizon=320
lt = track.LaneTracker()
vertices = np.array([[0, 256], [0, 220], [220, 150], [292, 150], [512, 200], [512, 256]], np.int32)
ld = detect.LaneDetector(vertices,road_horizon) #horizon line y coordinate
with open("Output.txt", "w") as text_file:
    while True:
            begin=time.time()
            precTick = ticks
            ticks = cv2.getTickCount()
            dt = (ticks - precTick) / cv2.getTickFrequency()

            frame = np.array(grab_screen(region=(10, 40, 810, 640)))
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
            #frame = cv2.imread('C:/Users/shun.yang/Desktop/lane_detect/track/test.png')
            #frame = cv2.imread('C:/Users/shun.yang/Desktop/lane_detect/lane_alex/img/1.png')
            predicted = lt.predict(dt)

            lanes = ld.detect(frame)

            if predicted is not None:
                cv2.line(frame, (predicted[0][0], predicted[0][1]), (predicted[0][2], predicted[0][3]), (0, 0, 255), 5)
def main(video_path):
    # cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture('project_video.mp4')

    ticks = 0

    lt = track.LaneTracker(2, 0.1, 500)
    ld = detect.LaneDetector(180)
    while cap.isOpened():
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()

        ret, frame = cap.read()
        # frame = cv2.flip( frame, -1 )

        # pts1 = np.float32([[0, 0], [640, 0], [640, 352], [0, 352]])
        # pts2 = np.float32([[300, 176], [12, 174], [620, 344], [72, 354]])
        # matrix = cv2.getPerspectiveTransform(pts1,pts2)
        # frame = cv2.warpPerspective(frame, matrix, (500, 500))

        predicted = lt.predict(dt)

        lanes = ld.detect(frame)

        if predicted is not None:
            cv2.line(frame, (predicted[0][0], predicted[0][1]),
                     (predicted[0][2], predicted[0][3]), (0, 0, 255), 7)
            cv2.line(frame, (predicted[1][2], predicted[1][3]),
                     (predicted[1][0], predicted[1][1]), (0, 0, 255), 7)

            center_coordinates_top = (
                (abs(predicted[1][0] - predicted[0][2]) / 2) + predicted[0][2],
                predicted[1][1])
            center_coordinates_bottom = (
                (abs(predicted[0][0] - predicted[1][2]) / 2) + predicted[0][0],
                predicted[0][1])

            cv2.line(frame, center_coordinates_top, center_coordinates_bottom,
                     (0, 2555, 0), 7)
            cv2.circle(frame, (int(frame.shape[1] / 2), int(frame.shape[0])),
                       10, (255, 0, 0), 2)  # Image Center marker

            # cv2.circle(frame, center_coordinates_bottom, 20, (0, 255, 0) , 4) # Center bottom marker
            # cv2.circle(frame, center_coordinates_top, 20, (255, 0, 0) , 4) # Center top marker
            # cv2.circle(frame, (predicted[0][2], predicted[0][3]), 20, (0, 255, 0) , 4) # Left lane marker
            # cv2.circle(frame, (predicted[1][0], predicted[1][1]), 20, (0, 255, 0) , 4) # Right lane marker

            # print('Lane left:',predicted[0][2], predicted[0][3], 'Lane right:',predicted[1][0], predicted[1][1]
            #       ,'Lane center:',center_coordinates_top[0] )

            error = frame.shape[1] / 2 - (
                abs(predicted[0][0] - predicted[1][2]) / 2 + predicted[0][0])
            angle = np.degrees(
                np.arctan(center_coordinates_top[0],
                          center_coordinates_top[1]))

            a = np.array([0, 352])
            b = np.array(
                [center_coordinates_bottom[0], center_coordinates_bottom[1]])
            c = np.array(
                [center_coordinates_top[0], center_coordinates_top[1]])

            ba = a - b
            bc = c - b

            cosine_angle = np.dot(
                ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
            angle = 90. - np.degrees(np.arccos(cosine_angle))

            print('Crosstrack Error:', error)
            cv2.putText(frame, 'Crosstrack Error:' + str('%.2f' % error),
                        (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (255, 255, 255), 2)
            cv2.putText(frame, 'Angle:' + str(angle[0]), (20, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)

        lt.update(lanes)

        cv2.imshow('', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
예제 #11
0
def main():
    cap = cv2.VideoCapture(0)  # 0
    cv2.namedWindow("demo")  # a new window
    ticks = 0
    s = 1
    t = 0
    lt = track.LaneTracker(2, 0.1, 200)
    ld = detect.LaneDetector(upline, lolane, Xl, Xr)
    ip_port = ('192.168.1.176', 50002)

    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock.bind(('0.0.0.0', 50000))  #UDP服务器端口和IP绑定
    #buf, addr = sock.recvfrom(40960)

    sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock1.bind(('0.0.0.0', 50001))  #UDP服务器端口和IP绑定
    #buf1, addr1 = sock1.recvfrom(40960)

    sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock2.bind(('0.0.0.0', 50002))  #UDP服务器端口和IP绑定
    #buf2, addr2 = sock2.recvfrom(40960)

    dn.set_gpu(1)
    net = dn.load_net(b"cfg/yolov3.cfg", b"yolov3.weights", 0)
    meta = dn.load_meta(b"cfg/coco.data")

    datad = 0.01
    pres = 0
    ns = 0
    speed = 0
    ndis = 0

    while (1):
        precTick = ticks
        ticks = cv2.getTickCount()
        dt = (ticks - precTick) / cv2.getTickFrequency()
        # print(dt)
        ret, frame = cap.read()

        # object detection using yolo

        res = dn.detect_numpy(net, meta, image=frame)
        ndis = 0
        for item in res:
            pt1 = (int(item[2][0] - item[2][2] * 0.5),
                   int(item[2][1] - item[2][3] * 0.5))
            pt2 = (int(item[2][0] + item[2][2] * 0.5),
                   int(item[2][1] + item[2][3] * 0.5))
            cv2.rectangle(frame, pt1, pt2, (0, 255, 0), 4)
            cv2.putText(frame, str(item[0], encoding="utf8"), pt1,
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
            #cv2.rectangle(frame, pt1, pt2, (0, 0, 255), 4)
            ns = item[2][3]
            ndis = max(pt1[1], pt2[1])

        speed = ns - pres
        pres = ns

        print('speed:', speed)
        print('ndis:', ndis)

        # cv2.rectangle(frame, (0,0), (599, 402), (0, 255, 0), 4)
        # frame2 = frame[upline:640, 0:480]

        if (ret):
            lanes = ld.detect(frame)
            # print(lanes)
            # if lanes:
            predicted = lt.predict(dt)

            if lanes and (t > 1):  # predict succeed
                sys.exit()
            # if s:
            #  print(ld.detectLight(frame))

            t = t + 1

            if predicted is not None and (predicted[0][1][0]
                                          == predicted[0][1][0]):
                if (t > 0):
                    t = t - 1

                xa, ya, xb, yb = predicted[0]
                x1, y1, x2, y2 = predicted[1]

                print('1: ', xa, ' ', ya)
                print('2: ', xb, ' ', yb)
                print('3: ', x1, ' ', y1)
                print('4: ', x2, ' ', y2)

                pt1 = [138, 480]
                pt2 = [265, 380]
                pt3 = [550, 480]
                pt4 = [370, 380]

                ptc = np.float32([pt1, pt2, pt3, pt4])

                ptt1 = [200, 450]
                ptt2 = [200, 300]
                ptt3 = [500, 450]
                ptt4 = [500, 300]

                pttc = np.float32([ptt1, ptt2, ptt3, ptt4])

                matrix = cv2.getPerspectiveTransform(ptc, pttc)

                print('matrix:', matrix)

                nimg = cv2.warpPerspective(frame, matrix, (650, 500))
                if float(xb - xa) == 0:
                    kl = -slopeMax
                else:
                    kl = float(yb - ya) / float(xb - xa)
                if float(x2 - x1) == 0:
                    k1 = slopeMax
                else:
                    k1 = float(y2 - y1) / float(x2 - x1)
                X = (xb * x2 - x1 * xa) / (xb + x2 - x1 - xa)
                # print("x")
                # print(xb, X, x1)    #left m right   ->   X

                # data = X + 320
                data = (xb + x2) / 2
                # data = drstrdrv(data,)
                fYaw = 300 - 5 - data[0]
                if (abs(fYaw) < 30):
                    fYaw = fYaw * 0.2
                elif (abs(fYaw) < 60):
                    fYaw = fYaw * 0.8
                else:
                    fYaw = fYaw * 0.9
                datac = float(math.atan(fYaw / 100)) / 2

                if (datad >= 0.25):
                    datad = 0.08

                elif (abs(fYaw) > 23):

                    datad = 0.25 + (abs(fYaw) / 120.0)
                    # data = drstrdrv(data,speed)
                # data = random.random()
                if ndis > 380 and ndis < 420:
                    datad += 0.35
                print('datad::', datad)
                print(fYaw)
                # data = 0.5
                data2 = X

                sock2.sendto(struct.pack('<dd', datac, datad), ip_port)
                #sock2.sendto(struct.pack('<d', datad),  addr2)

                #time.sleep(0.01)
                # sock.sendto(data,("192.168.1.107", 9005))
                cv2.imshow('tp', nimg)
                '''
                print 'L'
                print ((xa[0], ya[0]), (xb[0], yb[0]),kl)
                print 'R'
                print ((x1[0], y1[0]), (x2[0], y2[0]),k1)
                '''
                if (kl < -slopeL):
                    # if float(xb) < leftXR and float(xb) > leftXL:
                    cv2.line(frame, (predicted[0][0], predicted[0][1]),
                             (predicted[0][2], predicted[0][3]), (0, 0, 255),
                             5)
                if (k1 > slopeR):
                    # if float(x1) < leftXR and float(x1) > leftXL:
                    cv2.line(frame, (predicted[1][0], predicted[1][1]),
                             (predicted[1][2], predicted[1][3]), (0, 0, 255),
                             5)
            lt.update(lanes)

            # print(frame.shape)
            cv2.imshow('demo', frame)

        if cv2.waitKey(1) & 0xFF == ord('s'):
            s = 1 - s
        if cv2.waitKey(1) & 0xFF == ord('q'):  # 1
            break
예제 #12
0
import numpy as np
import cv2
from moviepy.editor import VideoFileClip
import argparse
import json

import track
import detect

# Import arguments
parser = argparse.ArgumentParser()
parser.add_argument('--videopath', type=str, required=True)
args = parser.parse_args()

lt = track.LaneTracker(2, 0.1, 500)
ld = detect.LaneDetector(180)

lines1 = {}
lines2 = {}

it = 0
ticks = 0
iti = 0


def track(frame):
    global lt, ld, ticks, it

    precTick = ticks
    ticks = cv2.getTickCount()
    dt = (ticks - precTick) / cv2.getTickFrequency()