Esempio n. 1
0
def main():
    """ Main entry point of the app """
    usr_input = input(
        'Select the Video\n\t1. Tag0 \n\t2. Tag1 \n\t3. Tag2 \n\t4. multipleTags\n\nYour Choice: '
    )
    print(getVideoFile(int(usr_input)))
    cap = cv2.VideoCapture(getVideoFile(int(usr_input)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    Xc = np.array([[0, 0], [199, 0], [199, 199], [0, 199]])
    while (cap.isOpened()):
        ret, frame = cap.read()
        if frame is not None:

            frame = np.array(frame, dtype=np.uint8)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            corner_points_temp, dst_total, frame = getCornerPoints(frame)
            for tag_no in range(0, (len(corner_points_temp))):
                corner_points = corner_points_temp[0]
                H = homographicTransform(corner_points, Xc)
                virtualCube(H, frame,
                            corner_points[4 * tag_no:4 * tag_no + 4][:])
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 2
0
def main():
    """ Main entry point of the app """
    usr_input = input(
        'Select the Video\n\t1. Tag0 \n\t2. Tag1 \n\t3. Tag2 \n\t4. multipleTags\n\nYour Choice: '
    )
    print(getVideoFile(int(usr_input)))
    cap = cv2.VideoCapture(getVideoFile(int(usr_input)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    Xc = np.array([[0, 0], [511, 0], [511, 511], [0, 511]])
    while (cap.isOpened()):
        ret, frame = cap.read()
        if frame is not None:

            cv2.imshow('Normal', frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            corner_points_temp, dst_total, frame = getCornerPoints(frame)
            for tag_no in range(0, (len(corner_points_temp))):

                corner_points = corner_points_temp[0]
                H = homographicTransform(corner_points, Xc)
                h_inv = np.linalg.inv(H)
                transformed_image = getTransfomredImage(h_inv, gray, 512)
                ID_val, rotations = decode(transformed_image)
                temp = frame
                frame = superImpose(h_inv, temp, rotations + 1)
            cv2.imshow('Superimposed', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 3
0
def main():
    """ Main entry point of the app """
    usr_input = input(
        'Select the Video\n\t1. Tag0 \n\t2. Tag1 \n\t3. Tag2 \n\t4. multipleTags\n\nYour Choice: ')
    print(getVideoFile(int(usr_input)))
    cap = cv2.VideoCapture(getVideoFile(int(usr_input)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    Xc = np.array([[0, 0], [0, 199], [199, 199], [199, 0]])
    while(cap.isOpened()):
        ret, frame = cap.read()
        if frame is not None:

            frame = np.array(frame, dtype=np.uint8)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            corner_points_temp, dst_total, frame = getCornerPoints(frame)

            for tag_no in range(0, (len(corner_points_temp))):
                corner_points = corner_points_temp[0]
                H = homographicTransform(corner_points, Xc)
                h_inv = np.linalg.inv(H)
                transformed_image = getTransfomredImage(h_inv, gray,200)
                ID_val,rotation = decode(transformed_image)
                cv2.imshow('transformed_image',transformed_image)
                cv2.putText(frame,'Tag ' + str(tag_no + 1) + ' value: ' + str(ID_val),(10,100 + 50*tag_no), font, 2, (200,255,155), 2, cv2.LINE_AA)
            frame[dst_total>0.01*dst_total.max()]=[0,0,255]
            cv2.imshow('Harris corner detector', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 4
0
def main():
    """ Main entry point of the app """
    print('in')
    usr_input = input(
        'Select the Video\n\t1. challenge_video.mp4 \n\t2. project_video.mp4 \n\nYour Choice: '
    )
    print(getVideoFile(int(usr_input)))
    cap = cv2.VideoCapture(getVideoFile(int(usr_input)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    Xc = np.array([[900, 0], [900, 710], [250, 710], [250, 0]])
    Xw = np.array([[685, 450], [1090, 710], [220, 710], [595, 450]])
    font = cv2.FONT_HERSHEY_SIMPLEX
    L_coef = np.zeros(3)
    R_coef = np.zeros(3)
    l_coef_arr = list()
    r_coef_arr = list()
    frame_width = int(cap.get(3))
    frame_height = int(cap.get(4))
    out = cv2.VideoWriter('outpy.avi',
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 25,
                          (frame_width, frame_height))
    while (cap.isOpened()):
        ret, frame = cap.read()
        if frame is not None:
            frame = np.array(frame, dtype=np.uint8)
            image_shape = frame.shape
            undistorted_img = get_undistort(frame)
            segmented_image = colorSegmentation(undistorted_img)
            Homography = homographicTransform(Xw, Xc)
            transformed_image = getTransfomredImage(
                np.linalg.inv(Homography[0]), segmented_image, frame.shape[1],
                frame.shape[0])
            hist = np.sum(transformed_image, axis=0)
            left_lane_hist = np.argmax(hist[0:int(len(hist) / 2)])
            right_lane_hist = np.argmax(hist[int(len(hist) / 2):-1]) + int(
                len(hist) / 2) - 1
            L_coef, R_coef = least_squares(transformed_image, left_lane_hist,
                                           right_lane_hist, L_coef, R_coef)
            frame = superImpose(L_coef, R_coef, Homography[0], undistorted_img)
            turn, l_coef_arr, r_coef_arr = detect_turn(L_coef, R_coef,
                                                       image_shape, l_coef_arr,
                                                       r_coef_arr)
            cv2.putText(frame, 'Turn: ' + turn, (10, 100), font, 2,
                        (200, 255, 155), 2, cv2.LINE_AA)
            cv2.imshow('Lane Detection', frame)
            out.write(frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 5
0
def main():
    """ Main entry point of the app """
    cap = cv2.VideoCapture('Tag1.mp4')
    while (cap.isOpened()):
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        ret, thresh = cv2.threshold(gray, 240, 255, 0, cv2.THRESH_BINARY)
        contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        contour_sizes = [(cv2.contourArea(contour), contour)
                         for contour in contours]
        biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
        dst_total = np.zeros(gray.shape, dtype='uint8')
        heirarchy = heirarchy[0]
        corner_idx_temp = np.zeros((4, 1))
        corner_idx = np.zeros((1, 1))
        corner_points = np.zeros((1, 2))
        for component in zip(contours, heirarchy):
            currentHierarchy = component[1]
            currentContour = component[0]
            size = cv2.minAreaRect(component[0])
            isSecond = 0
            idx = currentHierarchy[3]
            while True:
                if idx == -1:
                    break
                isSecond = isSecond + 1
                idx = heirarchy[idx][3]
            if isSecond == 1:
                gray_fl = np.float32(gray)
                mask = np.zeros(gray_fl.shape, dtype='uint8')
                mask = cv2.GaussianBlur(mask, (3, 3), 0)
                cv2.fillPoly(mask, [currentContour], (255, 255, 255))
                dst = cv2.cornerHarris(mask, 5, 3, 0.04)
                dst = cv2.dilate(dst, None)
                ret, dst = cv2.threshold(dst, 0.1 * dst.max(), 255, 0)
                dst = np.uint8(dst)
                cv2.drawContours(frame, [currentContour], -1, (0, 255, 0), 3)
                ret, labels, stats, centroids = cv2.connectedComponentsWithStats(
                    dst)
                criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,
                            100, 0.001)
                corners = cv2.cornerSubPix(gray_fl, np.float32(centroids),
                                           (5, 5), (-1, -1), criteria)
                dst_total = dst + dst_total
                corners = np.delete(corners, (0), axis=0)
                print(corners)
                if (len(corners) < 4):
                    break
                #corner_idx_temp = sortCorners(corners)
                #corners = np.delete(corners, np.setdiff1d(np.arange(len(corners)), corner_idx_temp), axis = 0)
                corners = sortCorners(corners)
                corner_points = np.concatenate((corner_points, corners),
                                               axis=0)
                #corner_idx = np.concatenate((corner_idx, corner_idx_temp), axis = 0)

        #corner_idx = np.delete(corner_idx, (0), axis=0)
        corner_points = np.delete(corner_points, (0), axis=0)
        #corner_idx = np.rint(corner_idx)
        #corner_idx = corner_idx.astype(int)
        corner_points = (np.rint(corner_points)).astype(int)
        #print('corner')
        #print(corner_idx)
        print('corner_points')
        print(corner_points)
        total_tags = np.int(len(corner_points) / 4)
        print('total_tags')
        print(total_tags)
        for tag_no in range(0, total_tags):
            #print('inside each tag')
            #print((corner_points[4*tag_no:4*tag_no+4][:]) )
            #print((corner_idx[4*tag_no:4*tag_no+4]))

            H = homographicTransform(
                corner_points[4 * tag_no:4 * tag_no +
                              4][:])  #,(corner_idx[4*tag_no:4*tag_no+4]))
            transformed_image = np.zeros((200, 200), dtype='uint8')
            h_inv = np.linalg.inv(H)
            for row in range(0, 200):
                for col in range(0, 200):
                    X_dash = np.array([col, row, 1]).T
                    X = np.matmul(h_inv, X_dash)
                    X = (X / X[2])
                    X = X.astype(int)
                    transformed_image[col][row] = gray[X[1]][X[0]]
            #cv2.imshow('QR_image',transformed_image)
            ID_val = decode(transformed_image)
            print(ID_val)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        frame_modi = frame
        frame_modi[dst_total > 0.01 * dst_total.max()] = [0, 0, 255]
        cv2.imshow('Harris corner detector', frame_modi)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Esempio n. 6
0
def main():
    """ Main entry point of the app """
    print('in')
    usr_input = input(
        'Select the Video\n\t1. challenge_video.mp4 \n\t2. project_video.mp4 \n\nYour Choice: '
    )
    print(getVideoFile(int(usr_input)))
    cap = cv2.VideoCapture(getVideoFile(int(usr_input)))
    font = cv2.FONT_HERSHEY_SIMPLEX
    Xc = np.array([[900, 0], [900, 710], [250, 710], [250, 0]])
    #([[200, 150], [1000, 150], [1000,600], [200,600]])
    Xw = np.array([[685, 450], [1090, 710], [220, 710],
                   [595,
                    450]])  #([[565, 471], [707, 471], [958, 618], [385, 618]])
    #Xc = np.array([[149, 0], [249, 0], [249, 399], [149, 399]])
    #Xw = np.array([[548, 518], [761, 522], [891, 616], [408, 616]])
    #    kernel = np.ones((4,4),np.uint8)
    font = cv2.FONT_HERSHEY_SIMPLEX
    L_coef = np.zeros(3)
    R_coef = np.zeros(3)
    l_coef_arr = list()
    r_coef_arr = list()

    # Xw = np.array([[600, 452], [683, 452], [1005, 630], [387, 630]])
    while (cap.isOpened()):
        ret, frame = cap.read()
        if frame is not None:
            frame = np.array(frame, dtype=np.uint8)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            image_shape = gray.shape
            '''
            ret, thresh = cv2.threshold(cropped_image, 150, 255, 0, cv2.THRESH_BINARY)
            try:
                _,contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            except:
                contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            '''

            undistorted_img = get_undistort(frame)
            segmented_image, img_output = colorSegmentation(undistorted_img)

            cropped_image = segmented_image.copy()
            cropped_image[0:int(image_shape[0] * 1 / 2), :] = 1
            # erosion = cv2.erode(segmented_image,kernel,iterations = 1)
            # houghTransform(erosion, frame)
            Homography = homographicTransform(Xw, Xc)
            transformed_image = getTransfomredImage(
                np.linalg.inv(Homography[0]), segmented_image, frame.shape[1],
                frame.shape[0])
            # hist = cv2.calcHist([transformed_image],[0],None,[2],[0,2])
            hist = np.sum(transformed_image, axis=0)
            edges = cv2.Canny(img_output, 100, 200)
            sobelx = cv2.Sobel(undistorted_img, cv2.CV_64F, 1, 0, ksize=5)
            #cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
            cv2.imshow('transformed_image', transformed_image)
            left_lane_hist = np.argmax(hist[0:int(len(hist) / 2)])
            right_lane_hist = np.argmax(hist[int(len(hist) / 2):-1]) + int(
                len(hist) / 2) - 1
            # image, y_points, x_points = slidingWindowFit(transformed_image, left_lane_hist, right_lane_hist)
            lefty, leftx, righty, rightx, L_coef, R_coef = least_squares(
                transformed_image, left_lane_hist, right_lane_hist, L_coef,
                R_coef)
            frame = superImpose(L_coef, R_coef, Homography[0], undistorted_img)
            turn, l_coef_arr, r_coef_arr = detect_turn(L_coef, R_coef,
                                                       image_shape, l_coef_arr,
                                                       r_coef_arr)
            cv2.putText(frame, 'Turn: ' + turn, (10, 100), font, 2,
                        (200, 255, 155), 2, cv2.LINE_AA)
            #cv2.imshow('Lane edges', segmented_image)
            cv2.imshow('Lane Detection', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break
    cap.release()
    cv2.destroyAllWindows()