def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    original_frames = []

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)
        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    while(ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        img = gabor_filter(img)

        canny = cv2.Canny(img, 50, 200)

        cv2.imshow('original image', img)
        cv2.imshow('after canny', canny)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
示例#2
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    transformed_frames = Frame_queue()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, transformed_img_width)
        transformed_frames.append(img)

        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    past_img = transformed_frames[-5].frame[300:, 20:-20]
    coord_y = 1
    while (ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, transformed_img_width)
        #img_gray, old_img_gray, match, absdiff = tm.find_obstacles(transformed_frames, (0, 50, 200, 400), method='sparse')

        transformed_frames.popleft()
        transformed_frames.append(img)
        # transformed_frames.append(gabor_filter(img))

        slam_height, slam_width = past_img.shape[:2]
        y_to = slam_height - coord_y - 200
        y_from = slam_height - coord_y - 300

        print('position', y_from, y_to)

        #        cv2.imshow('current', transformed_frames[-1].frame)
        cv2.imshow('frame position',
                   transformed_frames[-1].frame[-300:-200, :])
        cv2.imshow('slam position', past_img[y_from:y_to, :])
        cv2.imshow(
            'diff',
            cv2.absdiff(transformed_frames[-1].frame[-300:-200, 20:-20],
                        past_img[y_from:y_to, :]))

        past_img, shift_x, shift_y = stich_two_images(
            past_img, transformed_frames[-9].frame[300:, :],
            transformed_frames[-1].frame[300:, :])
        coord_y -= shift_y
        print('y is', coord_y)

        cv2.imshow('slam', past_img)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cv2.imwrite('slam.jpg', past_img)
    cap.release()
    cv2.destroyAllWindows()
示例#3
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    old_images = deque()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)
        img = cv2.blur(img, (7, 7))
        old_images.append(img)

        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    height, width, _ = frame.shape
    out_height, out_width, _ = img.shape
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'output.avi',
        fourcc, 15.0, (out_width * 4, out_height))

    frame_number = 0
    shift = 23

    while (ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)

        old_images.popleft()
        img = cv2.blur(img, (7, 7))
        old_images.append(img)

        new, old, sub_img = back_sub.calc_diff(old_images,
                                               shift_per_frame=shift)

        cv2.imshow('img', np.concatenate((img, new, old, sub_img), axis=1))

        dst = regress_perspecive(sub_img, pts1, (height, width))
        dst = cv2.addWeighted(frame, 0.3, dst, 0.7, 0)
        cv2.imshow('inv', dst)

        out.write(np.concatenate((img, new, old, sub_img), axis=1))

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('m'):
            shift += 1
            print(shift)
        elif k == ord('l'):
            shift -= 1
            print(shift)
        elif k == ord('s'):
            cv2.imwrite('screen.png', img)

    cap.release()
    out.release()
    cv2.destroyAllWindows()