def video_test(input_video_path=None):
    cx = 603
    cy = 297
    roi_width = 25
    roi_length = 90

    px_height_of_roi_length = 352
    #int(
    #    spline_dist.get_rails_px_height_by_distance(roi_length))
    #print(px_height_of_roi_length)

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    ret, frame = cap.read()

    while(ret):
        ret, frame = cap.read()

        transformed_plane, pts1, M = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length),
            px_height_of_roi_length, 200)

        extra_transformed_plane, pts1, M = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length),
            px_height_of_roi_length, 200,
            extra_width=200 * 2)

        cv2.imshow(
            'plane of the way',
            transformed_plane)

        cv2.imshow(
            'plane',
            extra_transformed_plane)

        cv2.imshow(
            'original frame',
            frame)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('screen.png', extra_transformed_plane)

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 2
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    original_frames = []

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)
        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    while(ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        img = gabor_filter(img)

        canny = cv2.Canny(img, 50, 200)

        cv2.imshow('original image', img)
        cv2.imshow('after canny', canny)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 3
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    transformed_frames = Frame_queue()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, transformed_img_width)
        transformed_frames.append(img)

        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    past_img = transformed_frames[-5].frame[300:, 20:-20]
    coord_y = 1
    while (ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, transformed_img_width)
        #img_gray, old_img_gray, match, absdiff = tm.find_obstacles(transformed_frames, (0, 50, 200, 400), method='sparse')

        transformed_frames.popleft()
        transformed_frames.append(img)
        # transformed_frames.append(gabor_filter(img))

        slam_height, slam_width = past_img.shape[:2]
        y_to = slam_height - coord_y - 200
        y_from = slam_height - coord_y - 300

        print('position', y_from, y_to)

        #        cv2.imshow('current', transformed_frames[-1].frame)
        cv2.imshow('frame position',
                   transformed_frames[-1].frame[-300:-200, :])
        cv2.imshow('slam position', past_img[y_from:y_to, :])
        cv2.imshow(
            'diff',
            cv2.absdiff(transformed_frames[-1].frame[-300:-200, 20:-20],
                        past_img[y_from:y_to, :]))

        past_img, shift_x, shift_y = stich_two_images(
            past_img, transformed_frames[-9].frame[300:, :],
            transformed_frames[-1].frame[300:, :])
        coord_y -= shift_y
        print('y is', coord_y)

        cv2.imshow('slam', past_img)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cv2.imwrite('slam.jpg', past_img)
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 4
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    old_images = deque()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)
        img = cv2.blur(img, (7, 7))
        old_images.append(img)

        ret, frame = cap.read()

    cy, cy = find_center_point(original_frames, (400, 100, 800, 719))

    height, width, _ = frame.shape
    out_height, out_width, _ = img.shape
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'output.avi',
        fourcc, 15.0, (out_width * 4, out_height))

    frame_number = 0
    shift = 23

    while (ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)

        old_images.popleft()
        img = cv2.blur(img, (7, 7))
        old_images.append(img)

        new, old, sub_img = back_sub.calc_diff(old_images,
                                               shift_per_frame=shift)

        cv2.imshow('img', np.concatenate((img, new, old, sub_img), axis=1))

        dst = regress_perspecive(sub_img, pts1, (height, width))
        dst = cv2.addWeighted(frame, 0.3, dst, 0.7, 0)
        cv2.imshow('inv', dst)

        out.write(np.concatenate((img, new, old, sub_img), axis=1))

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('m'):
            shift += 1
            print(shift)
        elif k == ord('l'):
            shift -= 1
            print(shift)
        elif k == ord('s'):
            cv2.imwrite('screen.png', img)

    cap.release()
    out.release()
    cv2.destroyAllWindows()
Exemplo n.º 5
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    transformed_frames = Frame_queue()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)
        transformed_frames.append(img)

        ret, frame = cap.read()

    obstacles_map, obstacles_on_frame = tm.detect_obstacles(
        transformed_frames, roi=(0, 250, 200, 550), pre_filter=gabor_filter)

    height, width, _ = frame.shape
    out_height, out_width = obstacles_map.shape[:2]

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'output.avi',
        fourcc, 15.0, (out_width, out_height))

    while (ret):
        ret, frame = cap.read()

        img, pts1 = inv_persp_new(frame, (cx, cy), (roi_width, roi_length),
                                  spline_dist, 200)

        transformed_frames.popleft()
        transformed_frames.append(img)

        obstacles_map, obstacles_on_frame = tm.detect_obstacles(
            transformed_frames,
            roi=(0, 250, 200, 550),
            pre_filter=gabor_filter)

        cv2.imshow('obstacles', obstacles_map)
        cv2.imshow('obstacles on frame', obstacles_on_frame)
        cv2.imshow('original', transformed_frames[-1].frame)

        out.write(obstacles_on_frame)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('screen.jpg', img)

    cap.release()
    out.release()
    cv2.destroyAllWindows()
Exemplo n.º 6
0
def video_test(output_video_path=None):
    cx = 595 - 300
    cy = 303 - 200

    roi_width = 6
    roi_length = 20
    roi_height = 4.4
    px_height_of_roi_length = 352

    # video output block
    json_frames = decode_stdin()
    frame = json_frames.__next__()

    out_height, out_width, _ = frame[200:, 300:-300].shape
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'output.avi',
        fourcc, 15.0, (out_width, out_height))

    # initialize old_frames
    old_frame = frame[200:, 300:-300]

    old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

    mask = np.zeros_like(old_frame)

    old_img = cv2.add(old_frame, mask)
    old_transformed_frame, pts1, M = inv_persp_new(old_img, (cx, cy),
                                                   (roi_width, roi_length),
                                                   px_height_of_roi_length,
                                                   400)

    # orb initialize
    orb = cv2.ORB_create(nfeatures=5500, edgeThreshold=10)

    # main loop #TODO fps output
    trackers = []

    for frame_number, frame in enumerate(json_frames):
        key = handle_keyboard(screenshot_image=None)
        if key == 1:
            break
        elif key == ord('n'):
            trackers = []
            continue

        frame = frame[200:, 300:-300]

        #frame = cv2.pyrUp(cv2.pyrDown(frame))
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        transformed_frame, pts1, M = inv_persp_new(frame, (cx, cy),
                                                   (roi_width, roi_length),
                                                   px_height_of_roi_length,
                                                   400)

        kp = make_np_array_from_points(orb.detectAndCompute(old_gray, None)[0])

        new_kp, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, kp,
                                                   None)

        masks, drawed_contours, obstacles_blocks_list = calculate_obstacles_map(
            mask, new_kp[st == 1], kp[st == 1], M, (cx, cy))

        img = frame.copy()
        detected_obstacles = sum_maps_equal(
            obstacles_blocks_list,
            [1 for i in range(len(obstacles_blocks_list))])
        obstacles = cv2.bitwise_and(frame,
                                    frame,
                                    mask=cv2.inRange(
                                        cv2.pyrUp(detected_obstacles),
                                        (0, 0, 1), (0, 0, 255)))

        cv2.imshow('tracked_obstacles',
                   cv2.addWeighted(frame, 0.3, obstacles, 0.7, 0))

        trackers = update_tracking_rectangle(trackers, frame, old_frame)
        detected_obstacles, trackers = track_obstacles(detected_obstacles,
                                                       trackers)

        print('all trackers:', len(trackers))
        print([i[4] for i in trackers])

        cv2.imshow(
            'detected obstacles',
            cv2.addWeighted(frame, 0.4, cv2.pyrUp(detected_obstacles), 0.6, 0))

        out.write(
            cv2.addWeighted(frame, 0.4, cv2.pyrUp(detected_obstacles), 0.6, 0))

        old_frame = frame.copy()
        old_gray = frame_gray.copy()
        old_img = img.copy()
        old_transformed_frame = transformed_frame.copy()
        mask = np.zeros_like(old_frame)

    out.release()
    cv2.destroyAllWindows()
def video_test(input_video_path=None):
    cx = 603
    cy = 297
    roi_width = 25
    roi_length = 90

    px_height_of_roi_length = 352
    #int(
    #    spline_dist.get_rails_px_height_by_distance(roi_length))
    #print(px_height_of_roi_length)

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    ret, frame = cap.read()

    while (ret):
        ret, frame = cap.read()

        transformed_plane, pts1, M = inv_persp_new(frame, (cx, cy),
                                                   (roi_width, roi_length),
                                                   px_height_of_roi_length,
                                                   500)

        extra_transformed_plane, pts1, M = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length),
            px_height_of_roi_length,
            200,
            extra_width=200 * 2)

        gray = cv2.cvtColor(extra_transformed_plane, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 150, apertureSize=3)
        lines = cv2.HoughLinesP(edges,
                                1,
                                np.pi / 180,
                                100,
                                minLineLength=100,
                                maxLineGap=10)

        for line in lines:
            x1, y1, x2, y2 = line[0]
            cv2.line(extra_transformed_plane, (x1, y1), (x2, y2), (0, 255, 0),
                     2)

        regressed_image = regress_perspecive(extra_transformed_plane, pts1,
                                             frame.shape[:2], 400)

        cv2.imshow('frame', cv2.addWeighted(regressed_image, 0.5, frame, 0.5,
                                            0))

        cv2.imshow('plane', extra_transformed_plane)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('screen.png', extra_transformed_plane)

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 8
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 595
    cy = 303
    roi_width = 25
    roi_length = 90

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    old_images = deque()
    original_frames = deque()

    ret, frame = cap.read()
    for i in range(15):
        original_frames.append(frame)

        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        old_images.append(img)

        ret, frame = cap.read()

    height, width, _ = frame.shape
    out_height, out_width, _ = img.shape
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'output.avi',
        fourcc, 15.0, (out_width * 4, out_height))

    left = cv2.imread('aloeL.jpg')
    right = cv2.imread('aloeR.jpg')
    while(ret):
        original_frames.popleft()
        ret, frame = cap.read()
        original_frames.append(frame)

        img, pts1 = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length), spline_dist, 200)
        old_images.popleft()
        old_images.append(img)

        left = original_frames[-5][:, width // 2:]
        right = original_frames[-1][:, width // 2:]

        left = cv2.pyrDown(left)
        left = cv2.blur(left, (3, 3))
        right = cv2.pyrDown(right)
        right = cv2.blur(right, (3, 3))

        depth = calculate_depth_map(left, right)
        cv2.imshow('left', left)
        cv2.imshow('right', right)
        cv2.imshow('depth', depth)
        depth = cv2.cvtColor(depth, cv2.COLOR_GRAY2BGR)
        res = cv2.addWeighted(left, 0.5, depth, 0.5, 0)
        cv2.imshow('res', res)

#        left = old_images[-1][300:,:]
#        right = old_images[-9][300:,:]
#
#        shift_value = find_shift_value(left, right, (30, 100, 60, 300))
#        right = np.roll(right, shift_value[1], axis=0)#shift_value[0])
#        right = np.roll(right, shift_value[0], axis=1)#shift_value[0])
#        left = left[100:-100,:]
#        right = right[100:-100,:]
#
#        print(shift_value)
#
#        left = np.rot90(left, 3)
#        right = np.rot90(right, 3)
#
#        cv2.imshow('left', left)
#        cv2.imshow('right', right)
#
#        shifted_map = cv2.equalizeHist(
#            calculate_depth_map(
#                left, right))
#        cv2.imshow(
#            'shifted map', shifted_map)
#        diff = cv2.absdiff(left, right)
#        cv2.imshow('diff', diff)

#        dm = calculate_depth_map(left, right)
#        cv2.imshow('dm', dm)
#        dm = cv2.equalizeHist(dm)
#        cv2.imshow('eq dm', dm)

#        dm = cv2.cvtColor(dm, cv2.COLOR_GRAY2BGR)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('screen.png', img)

    cap.release()
    out.release()
    cv2.destroyAllWindows()
Exemplo n.º 9
0
def video_test(input_video_path=None, output_video_path=None):
    cx = 603
    cy = 297
    roi_width = 25
    roi_length = 90

    px_height_of_roi_length = 352
    #int(
    #    spline_dist.get_rails_px_height_by_distance(roi_length))
    #print(px_height_of_roi_length)

    cap = cv2.VideoCapture(
        input_video_path \
            if input_video_path is not None \
            else input('enter video path: '))

    ret, frame = cap.read()

    out_height, out_width = frame.shape[:2]
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(
        output_video_path \
            if output_video_path is not None \
            else 'texture_matching.avi',
        fourcc, 15.0, (out_width, out_height))

    while(ret):
        ret, frame = cap.read()

        transformed_plane, pts1, M = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length),
            px_height_of_roi_length, 200)

        extra_transformed_plane, pts1, M = inv_persp_new(
            frame, (cx, cy), (roi_width, roi_length),
            px_height_of_roi_length, 200,
            extra_width=200 * 2)

        gray = cv2.cvtColor(transformed_plane, cv2.COLOR_BGR2GRAY)
        sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)
        for y in range(gray.shape[0]):
            for x in range(gray.shape[1]):
                gray[y, x] = cv2.fastAtan2(sobely[y, x], sobelx[y, x]) * 255 // 360
        s = v = np.ones_like(gray, dtype=np.uint8) * 255
        hsv = cv2.merge([gray, s, v])
        hsv = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

        height = hsv.shape[0]
        w, h = 20, 20
        #hsv = cv2.blur(hsv, (2, 2))
        template = hsv[height - h - 1: height - 1, 90:90 + w]
        res = cv2.matchTemplate(hsv, template,cv2.TM_SQDIFF_NORMED)
        threshold = 0.3
        loc = np.where( res <= 1-threshold)
        for pt in zip(*loc[::-1]):
            cv2.rectangle(hsv, pt, (pt[0] + w, pt[1] + h), (0,0,0), thickness=cv2.FILLED)
        cv2.imshow('res', res)
#        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        hsv = cv2.rectangle(
            hsv,
            (90, hsv.shape[0] - 21), (110, hsv.shape[0] - 1),
            (0, 0, 0), 2)

        regressed_image = regress_perspecive(
            extra_transformed_plane, pts1, frame.shape[:2], 400)
        regressed_texture = regress_perspecive(
            hsv, pts1, frame.shape[:2], 0)

        frame = cv2.addWeighted(
            regressed_texture, 0.5,
            frame, 0.5,
            0)

        cv2.imshow('frame', frame)
        cv2.imshow('plane', hsv)
        out.write(frame)

        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite('screen2.png', regressed_image)

    cap.release()
    out.release()
    cv2.destroyAllWindows()