def detect_lines(self, img):
     gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
     gray1 = cv2.GaussianBlur(gray,(3,3),5)
     gray2 = cv2.GaussianBlur(gray,(5,5),5)
     gray3 = cv2.GaussianBlur(gray,(15,15),3)
     linesL1 = lsd(gray1)
     linesL2 = lsd(gray2)
     linesL3 = lsd(gray3)
     linesL = np.vstack([linesL1, linesL2])
     linesL = np.vstack([linesL, linesL3])
     linesL = np.unique(linesL, axis=0)
     return linesL
Example #2
0
    def detect_lines(self, arg):
        gray = cv2.cvtColor(arg, cv2.COLOR_RGB2GRAY)
        lines = lsd(gray, ang_th=60, sigma_scale=3.0)
        if DEBUG:
            plt.imshow(arg)
            for x1, y1, x2, y2, _ in lines:
                plt.gca().add_artist(
                    Line2D((x1, x2), (y1, y2), linewidth=2,
                           linestyle='dashed'))
            dshow('line segments')
        imgHeight, imgWidth, _ = arg.shape
        y1max = self.parameter['marginTop'] * imgHeight
        y2min = self.parameter['marginBottom'] * imgHeight
        x1max = self.parameter['marginLeft'] * imgWidth
        x2min = self.parameter['marginRight'] * imgWidth
        hlines = []
        vlines = []
        for x1, y1, x2, y2, w in lines:
            dx = abs(x1 - x2)
            dy = abs(y1 - y2)
            # consider line segments near margins and not too short
            if dx > 15 and dy / dx < 0.15 and (y1 < y1max or y2 > y2min):
                hlines.append([x1, y1, x2, y2, w])
            if dy > 15 and dx / dy < 0.15 and (x1 < x1max or x2 > x2min):
                vlines.append([x1, y1, x2, y2, w])

        return imgHeight, imgWidth, hlines, vlines
def get_hor_and_ver_merged_lines(image_path,line_type):
    micro_soft_ocr = ocrbox.result_microsoft_api(image_path)
    img = cv2.imread(image_path, cv2.IMREAD_COLOR)
    gray_clr_obj = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    lines_arr = lsd(gray_clr_obj)
    word_bounding_boxes = get_word_bounding_boxes(micro_soft_ocr)
    text_bounding_boxes = get_text_bounding_boxes(micro_soft_ocr)

    if line_type != "tilted" : 
        line_coord_list=convert_lines_df_to_list(lines_arr)
    else:
        line_coord_list=convert_lines_df_to_list1(lines_arr)

    filter_line_coord_list = remove_lines_inside_text_box(line_coord_list,word_bounding_boxes)
    horizontal_values,vertical_values,tilted_lines = getting_horizontal_vertical_lines_values_from_lsd(filter_line_coord_list)   

    vertical_grouping_list,ver_grouping_lines = group_vertical_lines(vertical_values)
    final_table_vcoordinates = get_vertical_merging_lines(vertical_grouping_list)
    horizontal_grouping_list = group_horizantal_lines(horizontal_values)
    final_table_hcoordinates = get_horizantal_merging_lines(horizontal_grouping_list)

    return final_table_vcoordinates, final_table_hcoordinates,tilted_lines

# image_path = "/Users/prasanthpotnuru/Downloads/3_table_2/005.png"
# get_hor_and_ver_merged_lines(image_path)
Example #4
0
def extract_segment(path):
    img = Image.open(path).transpose(Image.FLIP_TOP_BOTTOM)
    # img = img.resize((1160, 1636), Image.NEAREST)
    gray = np.asarray(img.convert('L'))
    threshold = 96
    thresholdedData = (gray > threshold) * 255
    thresholdedData = morphology_test(thresholdedData)
    thresholdedData = removeSmallObject(thresholdedData, minSize=1000)
    lines = lsd(thresholdedData)
    fig, ax = plt.subplots()  # figsize=(15, 15))
    # ax.imshow(img, interpolation='nearest', cmap=plt.cm.gray)
    # thick_lines = get_thick_lines(lines)
    thick_lines = filter_thick_lines(lines, thresholdedData, 2)
    # thick_lines = lines
    for i in range(thick_lines.shape[0]):
        p0, p1 = (int(thick_lines[i, 0]),
                  int(thick_lines[i, 1])), (int(thick_lines[i, 2]),
                                            int(thick_lines[i, 3]))
        width = thick_lines[i, 4]
        ax.plot((p0[0], p1[0]), (p0[1], p1[1]), linewidth=width / 2)
    ax.axis('image')
    ax.set_xticks([])
    ax.set_yticks([])
    plt.show()
    csv_path = path.split('.')[0] + '.csv'
    write_csv(csv_path, np.ndarray.tolist(thick_lines))
Example #5
0
    def detect_lines(self, arg):
        Hline = []
        Vline = []
        img = cv2.imread(arg, cv2.IMREAD_COLOR)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        imgHeight, imgWidth = gray.shape
        lines = lsd(gray)

        for i in range(lines.shape[0]):
            pt1 = (int(lines[i, 0]), int(lines[i, 1]))
            pt2 = (int(lines[i, 2]), int(lines[i, 3]))
            # consider those line whise length more than this orbitrary value
            if (abs(pt1[0] - pt2[0]) > 45) and (
                (int(pt1[1]) < imgHeight * 0.25) or
                (int(pt1[1]) > imgHeight * 0.75)):
                # make full horizontal line
                Hline.append([0, int(pt1[1]), imgWidth, int(pt2[1])])
            if (abs(pt1[1] - pt2[1]) > 45) and (
                (int(pt1[0]) < imgWidth * 0.4) or
                (int(pt1[0]) > imgWidth * 0.6)):
                # make full vertical line
                Vline.append([int(pt1[0]), 0, int(pt2[0]), imgHeight])
        Hline.sort(key=lambda x: (x[1]), reverse=False)
        Vline.sort(key=lambda x: (x[0]), reverse=False)
        return img, imgHeight, imgWidth, Hline, Vline
    def process_segments(self):
        """Run the line segment detector."""
        if self._img is None:
            raise ValueError('image must be set before running the line segment detector')

        img_gray = np.array(self._img_scaled.convert('L'))

        lsd_result = lsd(img_gray, scale=self._lsd_scale)

        # sort by distance and keep the 9 longest segments
        # add a column to store distance
        rows, cols = lsd_result.shape
        segments_d = np.empty((rows, cols + 1))
        segments_d[:, :-1] = lsd_result

        # find distance of each line segment
        for i in range(segments_d.shape[0]):
            x1, y1, x2, y2, *_ = segments_d[i]
            segments_d[i, 5] = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)  # distance formula

        # sort and remove distance column
        lsd_sorted = segments_d[segments_d[:, 5].argsort()[::-1]][:, :-1]

        # keep only the 9 longest segments
        self.segments = lsd_sorted[:9]

        self._segments_processed = True
def detect_lsd_lines(image):

    image = image.astype('float64')
    if np.max(image) <= 1:
        image *= 255

    width = image.shape[1]
    height = image.shape[0]
    scale_w = np.maximum(width, height)
    scale_h = scale_w

    lsd_lines = lsd(image)

    lsd_lines[:, 0] -= width/2.0
    lsd_lines[:, 1] -= height/2.0
    lsd_lines[:, 2] -= width/2.0
    lsd_lines[:, 3] -= height/2.0
    lsd_lines[:, 0] /= (scale_w/2.0)
    lsd_lines[:, 1] /= (scale_h/2.0)
    lsd_lines[:, 2] /= (scale_w/2.0)
    lsd_lines[:, 3] /= (scale_h/2.0)
    lsd_lines[:, 1] *= -1
    lsd_lines[:, 3] *= -1

    return {'segments': lsd_lines[:, 0:4], 'nfa': lsd_lines[:, 0:4]}
Example #8
0
def detect_line_by_lsd(img00):
    # 画像をグレースケール化
    gray = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 5)

    # detect
    linesL = lsd(gray)

    return linesL
Example #9
0
    def get_corners(edged_img):
        lines = lsd(edged_img)
        # lines is list of list with value of each list item is
        # [point1.x, point1.y, point2.x, point2.y, width]

        corners = []
        if lines is not None:
            horizontal_lines_canvas = np.zeros(edged_img.shape, dtype=np.uint8)
            vertical_lines_canvas = np.zeros(edged_img.shape, dtype=np.uint8)
            for line in lines:
                x1, y1, x2, y2, _ = line
                x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
                if abs(x2 - x1) > abs(y2 - y1):
                    (x1, y1), (x2, y2) = sorted(((x1, y1), (x2, y2)), key=lambda pt: pt[0])
                    cv2.line(horizontal_lines_canvas, (max(x1 - 5, 0), y1), (min(x2 + 5, edged_img.shape[1] - 1), y2),
                             255, 2)
                else:
                    (x1, y1), (x2, y2) = sorted(((x1, y1), (x2, y2)), key=lambda pt: pt[1])
                    cv2.line(vertical_lines_canvas, (x1, max(y1 - 5, 0)), (x2, min(y2 + 5, edged_img.shape[0] - 1)),
                             255, 2)

            lines = []
            # find the horizontal lines (connected-components -> bounding boxes -> final lines)
            (contours, hierarchy) = cv2.findContours(horizontal_lines_canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            contours = sorted(contours, key=lambda c: cv2.arcLength(c, True), reverse=True)[:5]

            horizontal_lines_canvas = np.zeros(edged_img.shape, dtype=np.uint8)
            for contour in contours:
                contour = contour.reshape((contour.shape[0], contour.shape[2]))
                min_x = np.min(contour[:, 0]) + 2
                max_x = np.max(contour[:, 0]) - 2
                left_y = int(np.average(contour[contour[:, 0] == min_x][:, 1]))
                right_y = int(np.average(contour[contour[:, 0] == max_x][:, 1]))
                lines.append((min_x, left_y, max_x, right_y))
                cv2.line(horizontal_lines_canvas, (min_x, left_y), (max_x, right_y), 255, 1)
                corners.append((min_x, left_y))
                corners.append((max_x, right_y))

            # find the vertical lines (connected-components -> bounding boxes -> final lines)
            (contours, hierarchy) = cv2.findContours(vertical_lines_canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            contours = sorted(contours, key=lambda c: cv2.arcLength(c, True), reverse=True)[:5]

            vertical_lines_canvas = np.zeros(edged_img.shape, dtype=np.uint8)
            for contour in contours:
                contour = contour.reshape((contour.shape[0], contour.shape[2]))
                min_y = np.amin(contour[:, 1]) + 2
                max_y = np.amax(contour[:, 1]) - 2
                top_x = int(np.average(contour[contour[:, 1] == min_y][:, 0]))
                bottom_x = int(np.average(contour[contour[:, 1] == max_y][:, 0]))
                lines.append((top_x, min_y, bottom_x, max_y))
                cv2.line(vertical_lines_canvas, (top_x, min_y), (bottom_x, max_y), 255, 1)
                corners.append((top_x, min_y))
                corners.append((bottom_x, max_y))
            if corners:
                corners = filter_corner(corners)      # calling from helpers file
        return corners
Example #10
0
def return_distribution_length(img):
    lines = lsd(img)
    length_segments = np.zeros(lines.shape[0])
    for i in range(lines.shape[0]):
        length_segments[i] = length_segment(lines[i])
    length_segments = np.array(length_segments + 1, dtype=np.int)
    hist, bin_edges = np.histogram(length_segments,
                                   bins=int(length_segments.max()) - 1,
                                   range=(1, length_segments.max()))
    return hist, bin_edges
Example #11
0
 def drawLineSegments(self, visualize=False):
     lines = lsd(self.image)
     lines = self.removeFromBoxOfDoom(lines)
     lines = list(
         filter(lambda line: self.lineThresh(line, self.thresholdLine),
                lines))
     # # lineFilter
     if visualize:
         drawLines(lines)
     return lines
Example #12
0
def findLSDCand(im):
    """
	Use LSD algorithm to extract candidate line segment 

	Args:
		im: input image with shape [h, w, 3]
	Returns:
		cand: a list of all candidate lines, endpoint format, [x1, x2, y1, y2, r], r is not used in this project
	"""
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    points = lsd(gray)
    return points
Example #13
0
def lsd_alg(color_image,
            line_width=0,
            fuse=False,
            dTheta=2 / 360 * np.pi * 2,
            dRho=2,
            maxL=4):
    """
    LSD algoritm for line segment detection
    :param color_image: [np.array] The input image in BGR mode
            line_width: override line width during drawing result
           fuse:   [bool] Whether to fuse together the segments detected
           dTheta: [float] The max difference in theta between two segments to be fused together
           dRho:   [float] The max difference in rho between two segments to be fused together
           maxL:   [int] The maximal number of lines fused together. Set to 0 for no limit.
    :return:    lines: [np.array] (nb_lines, 4) each element of the array corresponds to [pt1_x, pt1_y, pt2_x, pt2_y] in DOUBLES
		        result_lines: [np.array] BGR images with red lines representing LSD result
                result_points: [np.array] BGR images with red dots representing LSD result
    """

    gray_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
    result_lines = np.zeros(
        gray_image.shape +
        (3, ))  # Black RGB image with same height/width than gray-image
    result_points = np.zeros(gray_image.shape + (3, ))
    lines = lsd(gray_image)  # python script calling the C++ so library

    # Fuse lines if asked
    if fuse:
        lines = lines.reshape((lines.shape[0], 1, lines.shape[1]))
        lines = sd.fuseCloseSegment(lines, dTheta, dRho, maxL)
        lines = lines.reshape((lines.shape[0], lines.shape[2]))

    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))
        if line_width == 0:
            width = lines[i, 4]
        else:
            width = line_width * 2
        cv2.line(result_lines, pt1, pt2, (255, 255, 255),
                 int(np.ceil(width / 2)))

        if 0 <= pt1[0] < gray_image.shape[1] and 0 <= pt1[1] < gray_image.shape[
                0]:  # Some coordinates returned can be out of bounds
            result_points[pt1[1]][pt1[0]][
                2] = 255  # Add a red pixel for each end-point of a line
        if 0 <= pt2[0] < gray_image.shape[1] and 0 <= pt2[
                1] < gray_image.shape[0]:
            result_points[pt2[1]][pt2[0]][
                2] = 255  # Add a red pixel for each end-point of a line

    return lines[:, :
                 4], result_lines, result_points  # Lines over a Black background
def LineSegmentedDetector(image, gray):
    resultL = image
    #image_copy = image.copy()
    linesL = lsd(gray)
    #print("lines_lsd:", len(linesL))
    for line in linesL:
        x1, y1, x2, y2 = map(int, line[:4])
        resultL = cv2.line(image, (x1, y1), (x2, y2), (0, 0, 255), 1)
        #if (x2-x1)**2 + (y2-y1)**2 > 1000:
        # 赤線を引く
        #    cv2.line(resultL, (x1,y1), (x2,y2), (0,0,255), 3)
    return resultL
Example #15
0
def _get_horizon_lines(bgr_img):
    """
    Given BGR image,
    Returns list of green lines (x1,y1,x2,y2,w).
    """
    # extract green color into green_mask
    # https://stackoverflow.com/questions/47483951/how-to-define-a-threshold-value-to-detect-only-green-colour-objects-in-an-image
    img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HLS)
    lower = np.uint8([40, 40, 40])
    upper = np.uint8([70, 255, 255])
    green_mask = cv2.inRange(img, lower, upper)

    return lsd(green_mask)
Example #16
0
def detect_line(file_path):
    file_path_without_ext, ext = os.path.splitext(file_path)
    src = cv2.imread(file_path, cv2.IMREAD_COLOR)
    gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    lines = lsd(gray)
    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))
        dist = distance.euclidean(pt1, pt2)
        ic(dist, pt1, pt2)
        if dist < 15:
            continue
        width = lines[i, 4]
        cv2.line(src, pt1, pt2, (0, 0, 255), int(np.ceil(width / 2)))
    out_file_path = file_path_without_ext + '_line.jpg'
    ic(out_file_path)
    cv2.imwrite(out_file_path, src)
Example #17
0
 def lineArt(self, image):
     fullName = image
     src = cv.imread(image, cv.IMREAD_COLOR)
     gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
     lines = lsd(gray)
     img = np.zeros([1080,1200,3],dtype=np.uint8)
     img.fill(255)
     os.mkdir('./lineart')
     for i in range(lines.shape[0]):
         pt1 = (int(lines[i, 0]), int(lines[i, 1]))
         pt2 = (int(lines[i, 2]), int(lines[i, 3]))
         width = lines[i, 4]
         cv.line(img, pt1, pt2, (0, 0, 0), int(np.ceil(width / 2)))
         if i%10==0 or i==lines.shape[0]-1:
             cv.imshow('Step', img)
             cv.waitKey(0)
             cv.destroyAllWindows()
             cv.imwrite('./gallery/new' + i + '.jpg', img)
Example #18
0
def constant_width(img, tau=20, thres=.5, compute_param=False):
    """

    :param img:
    :param tau:
    :param thres:
    :param compute_param:
    :return:
    """
    lines = lsd(img)
    if compute_param:
        thres_values = np.array([.1 * n for n in range(10)])
        NFA_values = np.zeros(10)
        for i in range(10):
            NFA_values[i] = return_NFA(img, tau, thres_values[i])
        best_index = np.argmin((NFA_values - 1)**2)
        threshold = thres_values[best_index]
        print("threshold = ", threshold)
    else:
        threshold = thres

    # test parallel segments
    print("\n****   begin parallel test   *****")
    index_segments = compute_parallel_segments(lines, tau=tau)

    # test gradient
    print("\n****   begin gradient test   *****")
    index_segments = return_opposed_gradient(lines, index_segments)

    # test aligned segments
    print("\n****   begin aligned test   *****")
    index_segments = return_aligned_segments(lines,
                                             index_segments,
                                             threshold=threshold)

    # test proximity
    print("\n****   begin proximity test   *****")
    index_segments = return_proximity_segments(lines, index_segments)

    print(f"\n****   NFA= {return_NFA(img, tau, threshold)}   *****")

    return index_segments
Example #19
0
def line_detection(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    lines = lsd(gray)
    p_lines = []
    image_one = np.ones(np.shape(image)) * 255

    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))
        width = np.sqrt((lines[i, 0] - lines[i, 2])**2 +
                        (lines[i, 1] - lines[i, 3])**2)
        angle = abs(lines[i, 0] - lines[i, 2]) / abs(lines[i, 1] - lines[i, 3])

        if width > 10 and (angle < 1):
            p_lines.append(
                [lines[i, 0], lines[i, 1], lines[i, 2], lines[i, 3]])
            cv2.line(image_one, pt1, pt2, (0, 0, 255), int(2))

    cv2.imwrite('test_lines.jpg', image_one)
    return p_lines
Example #20
0
    def frame_processing(self,  frame):
        """
        lsd line extraction
        """
        use_lines = []
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        lines = lsd(gray)  ##################  引用pylsd 包的部分r
#        print(lines)
        try:
            for i in range(lines.shape[0]):
                pt1 = (int(lines[i, 0]), int(lines[i, 1]))
                pt2 = (int(lines[i, 2]), int(lines[i, 3]))
                width = lines[i, 4]

                if( (eucldist_vectorized(pt1, pt2) < self.cam_LENTHRESHOLD) | (width < self.cam_WIDTHTHRESHOLD) ):
                    continue 
                use_lines.append(lines[i])
                final_frame = cv2.line(frame, pt1, pt2, (0, 0, 255), int(np.ceil(width / 2)))
            return final_frame,  use_lines   
        except :
            return frame,  lines
Example #21
0
def line_detect(src):
    '''
    detect lines in a given image

    Parameters:

    fpath - the path of your image

    Return:

    properties of detected lines
    '''

    gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    lines = lsd.lsd(gray)
    # drop the last column
    lines = np.delete(lines, -1, 1)
    '''
    (lines[i, 0], lines[i, 1]): starting point of the line
    (lines[i, 2], lines[i, 3]): ending point of the line
    '''
    return lines
def detect_vps(frame_gray,
               prms,
               frame_to_draw=None,
               points_staright_old=[],
               points_twisted_old=[]):
    lines = lsd.lsd(np.array(frame_gray, np.float32))
    lines = lines[:, 0:4]
    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))
        if (not frame_to_draw is None):
            cv.line(frame_to_draw, pt1, pt2, (0, 0, 255), 1)
    denoised_lanes = denoise_lanes(lines, prms)

    if (len(denoised_lanes) > 0):
        points_staright, points_twisted = convert_to_PClines(
            denoised_lanes, prms)
    else:
        points_staright, points_twisted = [], []

    detections_straight, m1, b1 = find_detections(points_staright, prms)
    detections_twisted, m2, b2 = find_detections(points_twisted, prms)

    if (len(detections_straight) == 0 and len(detections_twisted) == 0):
        return []

    # gather initial vanishing point detections
    mvp_all, NFAs = read_detections_as_vps(detections_straight, m1, b1,
                                           detections_twisted, m2, b2, prms)

    # refine detections, this returns 2 x ? array ? is the vps left after refining
    mvp_all = refine_detections(mvp_all, lines, prms)
    mvp_all, NFAs = remove_dublicates(mvp_all, NFAs, prms)
    for i in range(len(mvp_all[0])):
        p1 = np.int32(mvp_all[:, i])
        cv.circle(frame_to_draw, tuple(p1), 5, (0, 255, 0), 3)

    return mvp_all.T  # return as n x2 shape where n is the number of vps
Example #23
0
def estimateFocalLength(image):
    from pylsd.lsd import lsd

    height = image.shape[0]
    width = image.shape[1]

    lines = lsd(image.mean(2))

    lineImage = image.copy()
    for line in lines:
        cv2.line(lineImage, (int(line[0]), int(line[1])),
                 (int(line[2]), int(line[3])), (0, 0, 255),
                 int(np.ceil(line[4] / 2)))
        continue
    #cv2.imwrite('test/lines.png', lineImage)

    numVPs = 3
    VPs, VPLines, remainingLines = calcVanishingPoints(lines, numVPs=numVPs)
    #focalLength = (np.sqrt(np.linalg.norm(np.cross(VPs[0], VPs[1]))) + np.sqrt(np.linalg.norm(np.cross(VPs[0], VPs[2]))) + np.sqrt(np.linalg.norm(np.cross(VPs[1], VPs[2])))) / 3
    focalLength = (np.sqrt(np.abs(np.dot(VPs[0], VPs[1]))) +
                   np.sqrt(np.abs(np.dot(VPs[0], VPs[2]))) +
                   np.sqrt(np.abs(np.dot(VPs[1], VPs[2])))) / 3
    return focalLength
Example #24
0
def drawLineSegments(file):
    global image
    rows, cols = image.shape
    # gray = np.asarray(image.convert('L'))
    # gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
    lines = lsd(image)
    # draw = ImageDraw.Draw(image)
    if lines is not None:
        for line in lines:
            # print(line)
            pt1 = [int(line[0]), int(line[1])]
            pt2 = [int(line[2]), int(line[3])]
            # width = lines[i, 4]
            dx = int(line[2]) - int(line[0])
            dy = int(line[3]) - int(line[1])
            dx = dx
            dy = dy
            length = np.sqrt(dx * dx + dy * dy)
            if length > 50:
                print(length)
                cv2.line(image, tuple(pt1), tuple(pt2), (0, 0, 255), 4)

    cv2.imshow("img", image)
Example #25
0
def LineDetect(image, thLength):
    if image.shape[2] == 1:
        grayImage = image
    else:
        grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    imageLSD = np.copy(grayImage)

    # line segments, [pt1[0], pt1[1], pt2[0], pt2[1], width]
    linesLSD = lsd(imageLSD)
    del imageLSD

    # choose line segments whose length is less than thLength
    lineSegs = []
    for line in linesLSD:
        x1 = line[0]
        y1 = line[1]
        x2 = line[2]
        y2 = line[3]
        length = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
        if length > thLength:
            lineSegs.append([x1, y1, x2, y2])

    return lineSegs
Example #26
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2015-12-19 02:09:53
# @Author  : Gefu Tang ([email protected])
# @Link    : https://github.com/primetang/pylsd
# @Version : 0.0.1

import cv2
import numpy as np
import os
from pylsd.lsd import lsd
fullName = 'car.jpg'
folder, imgName = os.path.split(fullName)
src = cv2.imread(fullName, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
lines = lsd(gray)
for i in range(lines.shape[0]):
    pt1 = (int(lines[i, 0]), int(lines[i, 1]))
    pt2 = (int(lines[i, 2]), int(lines[i, 3]))
    width = lines[i, 4]
    cv2.line(src, pt1, pt2, (0, 0, 255), int(np.ceil(width / 2)))
cv2.imwrite(os.path.join(folder, 'cv2_' + imgName.split('.')[0] + '.jpg'), src)
Example #27
0
def V(im, width, height, focal, params, tmp_count):

    # fix random seed
    np.random.seed(1)

    # principal point is assumed at image center
    u0 = width / 2
    v0 = height / 2

    # line segment (LS) extraction based on the LSD algorithm
    # Reference https://github.com/primetang/pylsd

    gray = np.asarray(im.convert('L'))
    lines = lsd(gray, tmp_count)
    lsd_output = lines[:, :4]

    # import scipy.io as sio
    # mat_lsd = sio.loadmat('lsd.mat')
    # lsd_output = mat_lsd['lsd']

    # plot the results
    if plots.lsd:
        im1 = im.copy()
        cmap = plt.cm.hsv(np.linspace(0, 1, lsd_output.shape[0]))[:, :3]
        draw = ImageDraw.Draw(im1)
        for j in range(lsd_output.shape[0]):
            pt1 = (lsd_output[j, 0], lsd_output[j, 1])
            pt2 = (lsd_output[j, 2], lsd_output[j, 3])
            draw.line((pt1, pt2),
                      fill=tuple((cmap[j] * 255).astype(int)),
                      width=2)
        #im1.show()
        im1.save('tmp/tmp1.jpg')

    # LS filtering

    thres_aligned = max(width, height) / 128.
    length_t = np.sqrt(width + height) / 1.71
    ls = ls_filter(thres_aligned, length_t, lsd_output)
    ls_homo = normalize.normalize(ls, width, height, focal)

    # plot the results
    if plots.ls_filter:
        im2 = im.copy()
        cmap = plt.cm.hsv(np.linspace(0, 1, ls.shape[0]))[:, :3]
        draw = ImageDraw.Draw(im2)
        for j in range(ls.shape[0]):
            pt1 = (ls[j, 0], ls[j, 1])
            pt2 = (ls[j, 2], ls[j, 3])
            draw.line((pt1, pt2),
                      fill=tuple((cmap[j] * 255).astype(int)),
                      width=4)
        #im2.show()
        im2.save('tmp/tmp2.jpg')

    # ZL and zenith rough predictions

    # prediction of the zenith line
    dist_max = width / 8
    zl = zl_predict(lsd_output, dist_max, u0, v0, width, height, params)
    zl_homo = []
    z_homo_cand = []
    z_group_cand = []
    for i in range(len(zl)):
        zl_homo.append(
            normalize.normalize(np.array([[zl[i], 0, u0, v0]]), width, height,
                                focal))
        [tmp_z_homo_cand,
         tmp_z_group_cand] = z_predict(ls_homo, zl_homo[i], params, 0)
        z_homo_cand.append(tmp_z_homo_cand)
        z_group_cand.append(tmp_z_group_cand)

    # plot the results
    if plots.zl:
        im3 = im.copy()
        cmap = plt.cm.hsv(np.linspace(0, 1, len(z_homo_cand)))[:, :3]
        draw = ImageDraw.Draw(im3)
        for j in range(len(z_homo_cand)):
            # tmp = np.array([-0.0578, -0.9965, 0.0597])
            # z = unnormalize.unnormalize(tmp, width, height, focal, 0)
            z = unnormalize.unnormalize(z_homo_cand[j], width, height, focal,
                                        0)
            pt1 = (width / 2, height / 2)
            pt2 = (z[0], z[1])
            draw.line((pt1, pt2),
                      fill=tuple((cmap[j] * 255).astype(int)),
                      width=4)
        # im3.show()
        im3.save('tmp/tmp3.jpg')

    # choose the best zenith candidate based on the relevance of the predicted HLs

    best_z_cand = 0
    best_z_score = 0
    for i in range(len(zl_homo)):

        # HL prediction
        [modes_homo, _, _, _, _] = hl_predict(lsd_output, z_homo_cand[i], u0,
                                              v0, width, height, focal, params)

        # HL scoring (for performance optimization, each zenith candidate is assessed based only on the meaningful
        # HLs (no sampling is performed at that step))

        [_, results] = hl_score(modes_homo, ls_homo, z_homo_cand[i], params)

        # keep the zenith candidate with highest score
        if results["score"] > best_z_score:
            best_z_cand = i
            best_z_score = results["score"]

    # zenith refinement (based on Zhang et al. method)
    [z_homo_cand[best_z_cand],
     z_group_cand[best_z_cand]] = z_predict(ls_homo, zl_homo[best_z_cand],
                                            params, 1)

    # HL prediction
    [modes_homo, modes_offset, modes_left, modes_right,
     H] = hl_predict(lsd_output, z_homo_cand[best_z_cand], u0, v0, width,
                     height, focal, params)

    # HL sampling
    [samp_homo, samp_left,
     samp_right] = hl_sample(z_homo_cand[best_z_cand], modes_homo,
                             modes_offset, modes_left, modes_right, H, u0, v0,
                             width, height, focal, params)

    # plot the results
    if plots.hl_samples:
        if plots.hl_samples:
            im4 = im3.copy()
        else:
            im4 = im.copy()
        cmap = plt.cm.hsv(np.linspace(0, 1, len(zl_homo)))[:, :3]
        draw = ImageDraw.Draw(im4)
        for j in range(samp_homo.shape[1]):

            pt1 = (0, samp_left[j])
            pt2 = (width, samp_right[j])
            draw.line((pt1, pt2),
                      fill=tuple((cmap[i] * 255).astype(int)),
                      width=1)
        # im4.show()
        im4.save('tmp/tmp4.jpg')

    if plots.hl_modes:
        if plots.hl_samples and plots.hl_samples:
            im5 = im4.copy()
        else:
            im5 = im.copy()
        draw = ImageDraw.Draw(im5)
        for j in range(modes_homo.shape[1]):
            if H[j] > 0:
                pt1 = (0, modes_left[j])
                pt2 = (width, modes_right[j])
                draw.line((pt1, pt2), fill=tuple([0, 0, 255]), width=4)
        # im5.show()
        im5.save('tmp/tmp5.jpg')

    # HL scoring

    # import scipy.io as sio
    # tmp_samp_homo = sio.loadmat('samp_homo.mat')
    # samp_homo = tmp_samp_homo['samp_homo']

    [hl_homo, results] = hl_score(samp_homo, ls_homo, z_homo_cand[best_z_cand],
                                  params)

    # import scipy.io as sio
    # tmp_hl_homo = sio.loadmat('hl_homo.mat')
    # hl_homo = tmp_hl_homo['hl_homo']
    # hl_homo = np.squeeze(hl_homo)

    hl = unnormalize.unnormalize(hl_homo, width, height, focal, 1)
    hvps = unnormalize.unnormalize(results["hvp_homo"], width, height, focal,
                                   0)
    hvp_groups = results["hvp_groups"]
    z = unnormalize.unnormalize(z_homo_cand[best_z_cand], width, height, focal,
                                0)
    z_group = z_group_cand[best_z_cand]

    if params.return_z_homo:
        return hl, hvps, hvp_groups, z, z_group, ls, z_homo_cand[
            best_z_cand], results["hvp_homo"], ls_homo

    #print(0)
    return hl, hvps, hvp_groups, z, z_group, ls
Example #28
0
img00 = imgcol.copy()
#img00 = cv2.resize(img00,(int(img00.shape[1]/5),int(img00.shape[0]/5)))
gray = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 5)

t1 = time.time()
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
linesH = cv2.HoughLinesP(edges,
                         rho=1,
                         theta=np.pi / 360,
                         threshold=50,
                         minLineLength=50,
                         maxLineGap=10)
t2 = time.time()

linesL = lsd(gray)
t3 = time.time()

img2 = img00.copy()
for line in linesH:
    x1, y1, x2, y2 = line[0]

    # 赤線を引く
    img2 = cv2.line(img2, (x1, y1), (x2, y2), (0, 0, 255), 3)

cv2.imwrite('samp_hagh.jpg', img2)
img3 = img00.copy()
img4 = img00.copy()
for line in linesL:
    x1, y1, x2, y2 = map(int, line[:4])
    img3 = cv2.line(img3, (x1, y1), (x2, y2), (0, 0, 255), 3)
Example #29
0
import cv2
from pylsd.lsd import lsd
import numpy as np

img = cv2.imread('./../sample_image.jpg')
W = 200
w_ratio = 200 / img.shape[1]
img = cv2.resize(img, (200, int(w_ratio * img.shape[0])))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray1 = cv2.GaussianBlur(gray, (3, 3), 5)
gray2 = cv2.GaussianBlur(gray, (5, 5), 5)
gray3 = cv2.GaussianBlur(gray, (15, 15), 3)
#gray1 = cv2.bitwise_not(gray1)
linesL1 = lsd(gray1)
linesL2 = lsd(gray2)
linesL3 = lsd(gray3)

linesL = np.vstack([linesL1, linesL2])
linesL = np.vstack([linesL, linesL3])
#linesL.extend(linesL3)

for line in linesL:
    x1, y1, x2, y2 = map(int, line[:4])
    dx = x2 - x1
    dy = y2 - y1
    if (dx)**2 + (dy)**2 > 5000:
        #img = cv2.line(img, (x1,y1), (x2,y2), (0,0,255), 3)
        img = cv2.line(img, (x1 - (dx * 200), y1 - (dy * 200)),
                       (x1 + (dx * 200), y1 + (dy * 200)), (0, 255, 0), 2)

cv2.namedWindow('window')
Example #30
0
def classical(img):
    img = cv2.resize(img, (224, 224))  # For Resnet18
    img1 = img.copy()
    img = cv2.flip(img, 1)

    v_x = int(img.shape[1] / 2)
    v_y = int(img.shape[0] / 2)

    #Calculate lines using the LSD algorithm
    # v_x = int(v_x)
    # v_y = int(v_y)

    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # BGR TO GRAYSCALE
    img_gray = cv2.medianBlur(
        img_gray, 3
    )  # Removes salt and pepper noise by convolving the image with a (3,3) square kernel
    img_gray = cv2.GaussianBlur(
        img_gray, (9, 9), 0
    )  # Smoothens the image by convolving the with a (9,9) Gaussian filter

    lines = lsd(img_gray)
    # Selecting required lines form all possible lines.
    # cv2.line(img,(x3,y3),(x4,y4),(0,0,255),w/2)

    sel_lines, thetas, mags, w = process(lines, img, v_x, v_y)

    sel_lines = np.array(sel_lines)

    print("Number of lines selected are {0}".format(sel_lines.shape[0]))

    if sel_lines.shape[0] == 0:
        sys.exit("No lines found")

    theta_min = sel_lines[:, 5].min()
    theta_max = sel_lines[:, 5].max()

    mag_max = sel_lines[:, 6].max()
    mag_min = sel_lines[:, 6].min()

    tmin_ind = np.where(sel_lines == theta_min)[0][0]
    tmax_ind = np.where(sel_lines == theta_max)[0][0]
    mmin_ind = np.where(sel_lines == mag_min)[0][0]
    mmax_ind = np.where(sel_lines == mag_max)[0][0]

    L1 = line([sel_lines[tmax_ind, 0], sel_lines[tmax_ind, 1]],
              [sel_lines[tmax_ind, 2], sel_lines[tmax_ind, 3]])

    L2 = line([sel_lines[tmin_ind, 0], sel_lines[tmin_ind, 1]],
              [sel_lines[tmin_ind, 2], sel_lines[tmin_ind, 3]])

    R = intersection(L1, L2)

    if R:
        v_x, v_y = R
    else:
        sys.exit("No intersection point detected")

    innerangle = angle(sel_lines[tmax_ind], sel_lines[tmin_ind])

    mvp = np.tan(innerangle)
    mvpdeg = (innerangle * 180 / math.pi)

    # print 'mvpdeg is {0}'.format(innerangle*180/math.pi)

    # print 'Polar line equation value (distance) at the vanishing point is {0}'.format(pm)

    cvp = v_y - mvp * (v_x)
    yfin = img.shape[0]
    xfin = int((yfin - cvp) / mvp)

    # cv2.line(img,(sel_lines[tmax_ind,0].astype(int),sel_lines[tmax_ind,1].astype(int)),(sel_lines[tmax_ind,2].astype(int),sel_lines[tmax_ind,3].astype(int)),(255,0,0),int((sel_lines[tmax_ind,4].astype(int))/2))

    # cv2.line(img,(sel_lines[tmin_ind,0].astype(int),sel_lines[tmin_ind,1].astype(int)),(sel_lines[tmin_ind,2].astype(int),sel_lines[tmin_ind,3].astype(int)),(0,0,255),int((sel_lines[tmin_ind,4].astype(int))/2))

    cv2.line(img, (int(v_x), int(v_y)), (int(xfin), int(yfin)), (0, 255, 0), 4)

    cv2.circle(img, (int(v_x), int(v_y)), 10, (0, 255, 255), 3)

    # Drawing the desired line in black

    cv2.line(img, (int(img.shape[1] / 2), 0),
             (int(img.shape[1] / 2), img.shape[0]), (0, 0, 0), 3)

    cv2.namedWindow("image")
    cv2.setMouseCallback("image", setvp)

    # while True:
    #     dontwrite = 1
    #     cv2.imshow("image",img)
    #     cv2.waitKey(1000)
    #     cv2.destroyAllWindows()
    #     break
    # k = cv2.waitKey(0) & 0xFF

    # if k == 27:
    #     cv2.destroyAllWindows()

    # if k == ord('n'):
    #     break

    # if k == ord('s'):

    #     # output.write(zzz)
    #     sss = "/home/vdorbala/ICRA/Images/Test/{0}.png".format(file)
    #     cv2.imwrite(sss,img);
    #     break

    #After setting the new v_x, v_y. The same slope is considered.

    # cvp = v_y - mvp*(v_x)
    theta_m = thetam(sel_lines[tmax_ind], sel_lines[tmin_ind])

    # showagain = 0

    # if showagain == 1:

    #     showagain = 0

    #     yfin = img.shape[0]
    #     cv2.line(img,(int(v_x),int(v_y)),(int(xfin),int(yfin)),(0,255,0),int(w/2))

    #     cv2.circle(img,(int(v_x),int(v_y)), 10, (0,255,255), 3)

    #     #Drawing the desired line in black

    #     cv2.line(img,(int(img.shape[1]/2),0),(int(img.shape[1]/2),img.shape[0]),(0,0,0),3)

    #     cv2.imshow('image',img)
    #     k = cv2.waitKey(0) & 0xFF

    #     if k == 27:
    #         cv2.destroyAllWindows()
    #     # xfin = int((yfin -cvp)/mvp)
    #     den = (xfin-v_x)

    #     if den == 0:
    #         den = 1

    #     theta = np.arctan((yfin-v_y)/den)
    #     # if theta>90:
    #     if theta<0:
    #         theta = -math.pi/2 - theta
    #     if theta>0:
    #         theta = math.pi/2 - theta
    #     # innerangle = math.pi/2 - theta
    #     theta_m = theta
    #     mvp = np.tan(theta_m)
    #     # print ("Angle is {}".format(innerangle*180/math.pi))

    #     cvp = v_y - mvp*(v_x)

    # print ("TM is {}".format(theta_m*180/math.pi))

    ptd1 = [img.shape[1] / 2, 0]
    ptd2 = [img.shape[1] / 2, img.shape[0]]

    des_theta = 90 * math.pi / 180  # Because the desired line lies at the center of the image.

    des_line = [
        img.shape[1] / 2, 0, img.shape[1] / 2, img.shape[0], w / 2, des_theta,
        np.sqrt(np.square(ptd2[0] - ptd1[0]) + np.square(ptd2[1] - ptd1[1]))
    ]

    mvp_line = [
        v_x, v_y, xfin, yfin, w / 2, innerangle,
        np.sqrt(np.square(v_x - xfin) + np.square(v_y - yfin))
    ]

    mdl = np.tan(des_theta)

    ydes = ptd1[1]
    xdes = ptd1[0]

    cdes = ydes - mdl * xdes

    v_x = v_x - (img.shape[1] / 2
                 )  ##CHANGING TO CARTESIAN COORDINATES AS GIVEN IN THE PAPER
    v_y = -(v_y - (img.shape[0] / 2))

    # print (v_x)

    v_x = (v_x) * (1 / 5675
                   )  #Converting pixels to meters. Scale is 1m = 5675 pixels
    v_y = (v_y) * (1 / 5675)
    # print 'theta_m is {0}'.format(theta_m*180/math.pi)

    sel_lines = sel_lines.astype(int)
    selind = tmax_ind

    # print("Thetam is {}".format(theta_m))

    h = 1.6  #0.47 for umich
    l = 0

    w = 0

    s = np.sin(theta_m)
    c = np.cos(theta_m)

    pm = v_x * c + v_y * s

    lambda_m = np.cos(theta_m) / h

    print("V_X is {}".format(v_x))

    error = [[v_x], [theta_m]]

    error = np.matrix(error)

    # print 'Error is {0}\n'.format(error)
    # print (v_x)

    # Just testing the ideal case
    iderr = ([0], [0])
    iderr = np.matrix(iderr)

    le = 100 * error

    # print ("le is {}".format(le))

    lemax = 0

    Jw = [[1 + np.square(v_x)],
          [((-1) * lambda_m * l * c) + (lambda_m * w * pm) + (pm * s)]]

    Jw = np.matrix(Jw)

    # print 'Jw is {0}'.format(Jw)

    Jv = [[0], [(-1) * (lambda_m * pm)]]

    Jv = np.matrix(Jv)

    # print '\nJv is {0} \n'.format(Jv)
    vconst = 0.2

    pinv = (-1) * (np.linalg.pinv(Jw))

    # print 'Pseudo inverse is {0} \n'.format(pinv)

    fmat = le + Jv * vconst

    # print 'fmat is {0},{1},{2} \n'.format(type(fmat),fmat.shape,fmat)

    w = pinv * (fmat)

    print('w is {0} \n'.format(w))

    w = float(w)

    return w, img