Пример #1
0
    def get_corners(self, img):

        lines = lsd(img)

        corners = []
        if lines is not None:
            """separate out the horizontal and vertical lines, and draw them back onto separate canvases"""
            lines = lines.squeeze().astype(np.int32).tolist()
            horizontal_lines_canvas = np.zeros(img.shape, dtype=np.uint8)
            vertical_lines_canvas = np.zeros(img.shape, dtype=np.uint8)
            for line in lines:
                x1, y1, x2, y2, nan = line
                if abs(x2 - x1) > abs(y2 - y1):
                    (x1, y1), (x2, y2) = sorted(((x1, y1), (x2, y2)), key=lambda pt: pt[0])
                    cv2.line(horizontal_lines_canvas, (max(x1 - 5, 0), y1), (min(x2 + 5, img.shape[1] - 1), y2), 255, 2)
                else:
                    (x1, y1), (x2, y2) = sorted(((x1, y1), (x2, y2)), key=lambda pt: pt[1])
                    cv2.line(vertical_lines_canvas, (x1, max(y1 - 5, 0)), (x2, min(y2 + 5, img.shape[0] - 1)), 255, 2)

            lines = []

            """find the horizontal lines (connected-components -> bounding boxes -> final lines)"""
            contours, _ = cv2.findContours(horizontal_lines_canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            contours = sorted(contours, key=lambda c: cv2.arcLength(c, True), reverse=True)[:2]
            horizontal_lines_canvas = np.zeros(img.shape, dtype=np.uint8)
            for contour in contours:
                contour = contour.reshape((contour.shape[0], contour.shape[2]))
                min_x = np.amin(contour[:, 0], axis=0) + 2
                max_x = np.amax(contour[:, 0], axis=0) - 2
                left_y = int(np.average(contour[contour[:, 0] == min_x][:, 1]))
                right_y = int(np.average(contour[contour[:, 0] == max_x][:, 1]))
                lines.append((min_x, left_y, max_x, right_y))
                cv2.line(horizontal_lines_canvas, (min_x, left_y), (max_x, right_y), 1, 1)
                corners.append((min_x, left_y))
                corners.append((max_x, right_y))

            """find the vertical lines (connected-components -> bounding boxes -> final lines)"""
            contours, _ = cv2.findContours(vertical_lines_canvas, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            contours = sorted(contours, key=lambda c: cv2.arcLength(c, True), reverse=True)[:2]
            vertical_lines_canvas = np.zeros(img.shape, dtype=np.uint8)
            for contour in contours:
                contour = contour.reshape((contour.shape[0], contour.shape[2]))
                min_y = np.amin(contour[:, 1], axis=0) + 2
                max_y = np.amax(contour[:, 1], axis=0) - 2
                top_x = int(np.average(contour[contour[:, 1] == min_y][:, 0]))
                bottom_x = int(np.average(contour[contour[:, 1] == max_y][:, 0]))
                lines.append((top_x, min_y, bottom_x, max_y))
                cv2.line(vertical_lines_canvas, (top_x, min_y), (bottom_x, max_y), 1, 1)
                corners.append((top_x, min_y))
                corners.append((bottom_x, max_y))

            """find the corners"""
            corners_y, corners_x = np.where(horizontal_lines_canvas + vertical_lines_canvas == 2)
            corners += zip(corners_x, corners_y)

        """remove corners in close proximity"""
        corners = self.filter_corners(corners)
        return corners
Пример #2
0
def detect_lines(arg):
	Hline=[]; Vline=[]
	img = cv2.imread(arg, cv2.IMREAD_COLOR)
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
	imgHeight, imgWidth = gray.shape
	lines = lsd(gray)

	for i in xrange(lines.shape[0]):
	    pt1 = (int(lines[i, 0]), int(lines[i, 1]))
	    pt2 = (int(lines[i, 2]), int(lines[i, 3]))
	    width = lines[i, 4]
	    if (abs(pt1[0]-pt2[0])>45) and ((int(pt1[1])<imgHeight*0.25) or (int(pt1[1])>imgHeight*0.75)):		# consider those line whise length more than this orbitrary value
	    	Hline.append([0, int(pt1[1]), imgWidth, int(pt2[1])])	# make full horizontal line
	    if (abs(pt1[1]-pt2[1])>45) and ((int(pt1[0])<imgWidth*0.4) or (int(pt1[0])>imgWidth*0.6)):
			Vline.append([int(pt1[0]), 0, int(pt2[0]), imgHeight])	# make full vertical line
	Hline.sort(key=lambda x:(x[1]), reverse=False)
	Vline.sort(key=lambda x:(x[0]), reverse=False)
	return img, imgHeight, imgWidth, Hline, Vline
Пример #3
0
def find_segments(im: Image,
                  num_segments=9,
                  sort_by_width=False) -> np.ndarray:
    """
    Find the largest line segments in an image and return an array of segment points.
    :param im: PIL Image object
    :param num_segments: Number of segments to return
    :param sort_by_width: If true, return segments with the largest width rather than distance
    :return: An array of line segment points [x1, y1, x2, y2, width]
    """
    # resize image to fit in video (1280x720) without cropping
    scale = min(1280 / im.width, 720 / im.height)
    im = im.resize((int(im.width * scale), int(im.height * scale)))
    x_offset = (1280 - im.width) // 2
    y_offset = (720 - im.height) // 2

    im_gray = np.array(im.convert('L'))

    segments = lsd(im_gray)

    # add offset to segment points since the image will be centered in the video
    segments[:, 0:3:2] += x_offset  # add x_offset to column 0 and 2
    segments[:, 1:4:2] += y_offset  # add Y_offset to column 1 and 3

    # sort by distance or width of segments
    if sort_by_width:
        segments = segments[segments[:, 4].argsort()[::-1]]
    else:
        # add a column to store distance
        rows, cols = segments.shape
        segments_d = np.empty((rows, cols + 1))
        segments_d[:, :-1] = segments

        # find length of each line segment
        for i in range(segments_d.shape[0]):
            x1, y1, x2, y2, *_ = segments_d[i]
            segments_d[i, 5] = np.sqrt(
                (x2 - x1)**2 + (y2 - y1)**2)  # distance formula

        # sort and remove distance column
        segments = segments_d[segments_d[:, 5].argsort()[::-1]][:, :-1]

    return segments[:num_segments]
Пример #4
0
def lsdWrap(img):
    '''
    Opencv implementation of
    Rafael Grompone von Gioi, Jérémie Jakubowicz, Jean-Michel Morel, and Gregory Randall,
    LSD: a Line Segment Detector, Image Processing On Line, vol. 2012.
    [Rafael12] http://www.ipol.im/pub/art/2012/gjmr-lsd/?utm_source=doi
    @img
        input image
    '''
    if len(img.shape) == 3:
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    lines = lsd(img, quant=0.7)
    if lines is None:
        return np.zeros_like(img), np.array([])
    edgeMap = np.zeros_like(img)
    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))
        width = lines[i, 4]
        cv2.line(edgeMap, pt1, pt2, 255, int(np.ceil(width / 2)))
    edgeList = np.concatenate([lines, np.ones_like(lines[:, :2])], 1)
    return edgeMap, edgeList
Пример #5
0
    def __detect_lines(self, img):
        """
        Detects lines using OpenCV LSD Detector
        """
        # Convert to grayscale if required
        if len(img.shape) == 3:
            img_copy = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            img_copy = img

        # Create LSD detector with default parameters
        #lsd = cv2.createLineSegmentDetector(0)

        # Detect lines in the image
        # Returns a NumPy array of type N x 1 x 4 of float32
        # such that the 4 numbers in the last dimension are (x1, y1, x2, y2)
        # These denote the start and end positions of a line
        # lines = lsd.detect(img_copy)[0]
        lines = lsd(img_copy)
        lines = lines[:, :4]
        print(lines.shape)
        # Remove singleton dimension
        #lines = lines[:, 0]

        # Filter out the lines whose length is lower than the threshold
        dx = lines[:, 2] - lines[:, 0]
        dy = lines[:, 3] - lines[:, 1]
        lengths = np.sqrt(dx * dx + dy * dy)
        mask = lengths >= self._length_thresh
        lines = lines[mask]

        # Store the lines internally
        self.__lines = lines

        # Return the lines
        return lines
Пример #6
0
    for j in range(0, warped.shape[1]):
        if (warped[i][j] > 30):
            index.append(j)

    bWidth = index[-1] - index[0]
    print "width of src:" + str(bWidth)

    # Calculate Height
    roi = srcWarped[:,
                    int(centroid[0] -
                        int(centroid[0] * 20.0 /
                            100)):int(centroid[0] +
                                      int(centroid[0] * 20.0 / 100))].copy()
    roi = cv2.GaussianBlur(roi, (3, 3), 0)
    grayRoi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    lines = lsd(grayRoi)
    h = 0
    ind = 0
    for i in xrange(lines.shape[0]):
        [x1, y1, x2, y2, width] = lines[i]
        dist = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
        if dist > h:
            h = dist
            ind = i

    print "height of src:" + str(h)
    pt1 = (int(lines[ind, 0]), int(lines[ind, 1]))
    pt2 = (int(lines[ind, 2]), int(lines[ind, 3]))
    cv2.line(roi, pt1, pt2, (0, 0, 255), 1, cv2.LINE_AA)
    '''
    ## Reference measures
Пример #7
0
    cv2.imwrite("data/skel.pgm",bw)
    cv2.imwrite("data/th.pgm",th2)
    ends = skeleton_points(bw)
    ## detection of ground, capacitor, v_source
    v_pairs,h_pairs = lines_between_ends(ends)
    v_boxes = box_between_ends(v_pairs)
    h_boxes = box_between_ends(h_pairs)
    boxes = v_boxes + h_boxes

    ## segmentation operations
    ## remove founded symbols and connection lines
    for ((x,y,w,h),idx) in boxes:
    	th[y:y+h,x:x+w] = 0

    ## detect vert and hori lines then remove them from binary image
    lsd_lines = lsd(th)
    for line in lsd_lines:
    	x1,y1,x2,y2,w = line
    	angle = np.abs(np.rad2deg(np.arctan2(y1 - y2, x1 - x2)))
    	if (angle<105 and angle>75) or angle>160 or angle<20:
            cv2.line(th,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,0),6)

    kernel = np.ones((11,11),np.uint8)
    closing = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)

    rects = []
    # Find Blobs on image
    cnts = cv2.findContours(closing.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
    for c in cnts:
        if cv2.contourArea(c)<80:
            continue
Пример #8
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2015-12-19 02:09:53
# @Author  : Gefu Tang ([email protected])
# @Link    : https://github.com/primetang/pylsd
# @Version : 0.0.1

import cv2
import numpy as np
import os
from pylsd import lsd
fullName = 'car.jpg'
folder, imgName = os.path.split(fullName)
src = cv2.imread(fullName, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
lines = lsd(gray)
for i in range(lines.shape[0]):
    pt1 = (int(lines[i, 0]), int(lines[i, 1]))
    pt2 = (int(lines[i, 2]), int(lines[i, 3]))
    width = lines[i, 4]
    cv2.line(src, pt1, pt2, (0, 0, 255), int(np.ceil(width / 2)))
cv2.imwrite(os.path.join(folder, 'cv2_' + imgName.split('.')[0] + '.jpg'), src)
Пример #9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
import os
from PIL import Image, ImageDraw
from pylsd import lsd

full_name = 'house.png'
folder, img_name = os.path.split(full_name)
img = Image.open(full_name)
img_gray = np.asarray(img.convert('L'))

segments = lsd(img_gray, scale=0.5)

draw = ImageDraw.Draw(img)
for i in range(segments.shape[0]):
    pt1 = (int(segments[i, 0]), int(segments[i, 1]))
    pt2 = (int(segments[i, 2]), int(segments[i, 3]))
    width = segments[i, 4]
    draw.line((pt1, pt2), fill=(0, 0, 255), width=int(np.ceil(width / 2)))

img.save(os.path.join(folder, 'PIL_' + img_name.split('.')[0] + '.jpg'))
Пример #10
0
#for i in range(segments.shape[0]):
#    pt1 = (int(segments[i, 0]), int(segments[i, 1]))
#    pt2 = (int(segments[i, 2]), int(segments[i, 3]))
#    width = segments[i, 4]
#    cv2.line(img, pt1, pt2, (0, 0, 255), int(np.ceil(width)))

#cv2.imwrite('output.jpg', img)

cap = cv2.VideoCapture('WIN_20210103_20_39_07_Pro.mp4')

while (cap.isOpened()):
    ret, frame = cap.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #, scale=0.8, sigma_scale=0.9, ang_th=22.5, quant=2.0, eps=150, density_th=0.7, n_bins=1024, max_grad=255.0
    segments = lsd(gray)

    for i in range(segments.shape[0]):
        pt1 = (int(segments[i, 0]), int(segments[i, 1]))
        pt2 = (int(segments[i, 2]), int(segments[i, 3]))
        width = segments[i, 4]
        cv2.line(frame, pt1, pt2, (0, 0, 255), int(np.ceil(width)))

    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Пример #11
0
 def detect_lines(image):
     lsd.lsd()
        pass
       
    try:
        v_pairs,h_pairs = lines_between_ends(ends)
        v_boxes = box_between_ends(v_pairs)
        h_boxes = box_between_ends(h_pairs)
        boxes = v_boxes + h_boxes
    except:
        boxes=[]
    ## segmentation operations
    ## remove founded symbols and connection lines
    for ((x,y,w,h),idx) in boxes:
        th[y:y+h,x:x+w] = 0

    ## detect vert and hori lines then remove them from binary image
    lsd_lines = lsd(th)
    for line in lsd_lines:
        x1,y1,x2,y2,w = line
        angle = np.abs(np.rad2deg(np.arctan2(y1 - y2, x1 - x2)))
        if (angle<105 and angle>75) or angle>160 or angle<20:
            cv2.line(th,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,0),6)

    kernel = np.ones((11,11),np.uint8)
    closing = cv2.morphologyEx(th, cv2.MORPH_CLOSE, kernel)

    rects = []
    # Find Blobs on image  gives regions where componenets could be present
    cnts = cv2.findContours(closing.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[1]
    for c in cnts:
        if cv2.contourArea(c)<80:    #threshold don't change unless required
        #if cv2.contourArea(c)<600:  #for larger images if components have large area 
Пример #13
0
def fitPlanesPiecewise(image, depth, normal, info, numOutputPlanes=20, imageIndex=1, parameters={}):
    if 'meanshift' in parameters and parameters['meanshift'] > 0:
        import sklearn.cluster
        meanshift = sklearn.cluster.MeanShift(parameters['meanshift'])
        pass
    
    from pylsd import lsd
    
    height = depth.shape[0]
    width = depth.shape[1]

    camera = getCameraFromInfo(info)
    urange = (np.arange(width, dtype=np.float32) / (width) * (camera['width']) - camera['cx']) / camera['fx']
    urange = urange.reshape(1, -1).repeat(height, 0)
    vrange = (np.arange(height, dtype=np.float32) / (height) * (camera['height']) - camera['cy']) / camera['fy']
    vrange = vrange.reshape(-1, 1).repeat(width, 1)
    
    X = depth * urange
    Y = depth
    Z = -depth * vrange


    normals = normal.reshape((-1, 3))
    normals = normals / np.maximum(np.linalg.norm(normals, axis=-1, keepdims=True), 1e-4)
    validMask = np.logical_and(np.linalg.norm(normals, axis=-1) > 1e-4, depth.reshape(-1) > 1e-4)
    
    points = np.stack([X, Y, Z], axis=2).reshape(-1, 3)
    valid_points = points[validMask]
    
    lines = lsd(image.mean(2))

    lineImage = image.copy()
    for line in lines:
        cv2.line(lineImage, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])), (0, 0, 255), int(np.ceil(line[4] / 2)))
        continue
    cv2.imwrite('test/lines.png', lineImage)

    numVPs = 3
    VPs, VPLines, remainingLines = calcVanishingPoints(lines, numVPs=numVPs)

    lineImage = image.copy()    
    for VPIndex, lines in enumerate(VPLines):
        for line in lines:
            cv2.line(lineImage, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])), ((VPIndex == 0) * 255, (VPIndex == 1) * 255, (VPIndex == 2) * 255), int(np.ceil(line[4] / 2)))
            continue
        continue
    cv2.imwrite('test/lines_vp.png', lineImage)    

    dominantNormals = np.stack([(VPs[:, 0] * info[16] / width - info[2]) / info[0], np.ones(numVPs), -(VPs[:, 1] * info[17] / height - info[6]) / info[5]], axis=1)
    dominantNormals /= np.maximum(np.linalg.norm(dominantNormals, axis=1, keepdims=True), 1e-4)

    dotThreshold = np.cos(np.deg2rad(20))
    for normalIndex, crossNormals in enumerate([[1, 2], [2, 0], [0, 1]]):
        normal = np.cross(dominantNormals[crossNormals[0]], dominantNormals[crossNormals[1]])
        normal = normalize(normal)
        if np.dot(normal, dominantNormals[normalIndex]) < dotThreshold:
            dominantNormals = np.concatenate([dominantNormals, np.expand_dims(normal, 0)], axis=0)
            pass
        continue

    print(VPs)
    print(dominantNormals)
    
    dominantNormalImage = np.abs(np.matmul(normal, dominantNormals.transpose()))
    cv2.imwrite('test/dominant_normal.png', drawMaskImage(dominantNormalImage))
    
    planeHypothesisAreaThreshold = width * height * 0.01
    
    planes = []
    vpPlaneIndices = []
    if 'offsetGap' in parameters:
        offsetGap = parameters['offsetGap']
    else:
        offsetGap = 0.1
        pass
    planeIndexOffset = 0

    for dominantNormal in dominantNormals:
        if np.linalg.norm(dominantNormal) < 1e-4:
            continue
        offsets = np.tensordot(valid_points, dominantNormal, axes=([1], [0]))

        if 'meanshift' in parameters and parameters['meanshift'] > 0:
            sampleInds = np.arange(offsets.shape[0])
            np.random.shuffle(sampleInds)
            meanshift.fit(np.expand_dims(offsets[sampleInds[:int(offsets.shape[0] * 0.02)]], -1))
            for offset in meanshift.cluster_centers_:
                planes.append(dominantNormal * offset)
                continue
        else:
            offset = offsets.min()
            maxOffset = offsets.max()
            while offset < maxOffset:
                planeMask = np.logical_and(offsets >= offset, offsets < offset + offsetGap)
                segmentOffsets = offsets[np.logical_and(offsets >= offset, offsets < offset + offsetGap)]
                if segmentOffsets.shape[0] < planeHypothesisAreaThreshold:
                    offset += offsetGap
                    continue
                planeD = segmentOffsets.mean()
                planes.append(dominantNormal * planeD)
                offset = planeD + offsetGap

                continue
            pass
        

        vpPlaneIndices.append(np.arange(planeIndexOffset, len(planes)))
        planeIndexOffset = len(planes)
        continue

    if len(planes) == 0:
        return np.array([]), np.zeros(segmentation.shape).astype(np.int32)    
    planes = np.array(planes)

    
    
    planesD = np.linalg.norm(planes, axis=1, keepdims=True)
    planeNormals = planes / np.maximum(planesD, 1e-4)

    if 'distanceCostThreshold' in parameters:
        distanceCostThreshold = parameters['distanceCostThreshold']
    else:
        distanceCostThreshold = 0.05
        pass


    distanceCost = np.abs(np.tensordot(points, planeNormals, axes=([1, 1])) - np.reshape(planesD, [1, -1])) / distanceCostThreshold

    normalCostThreshold = 1 - np.cos(np.deg2rad(30))        
    normalCost = (1 - np.abs(np.tensordot(normals, planeNormals, axes=([1, 1])))) / normalCostThreshold

    if 'normalWeight' in parameters:
        normalWeight = parameters['normalWeight']
    else:
        normalWeight = 1
        pass
    
    unaryCost = distanceCost + normalCost * normalWeight
    unaryCost *= np.expand_dims(validMask.astype(np.float32), -1)    
    unaries = unaryCost.reshape((width * height, -1))
    
    
    print('number of planes ', planes.shape[0])
    cv2.imwrite('test/distance_cost.png', drawSegmentationImage(-distanceCost.reshape((height, width, -1)), unaryCost.shape[-1] - 1))

    cv2.imwrite('test/normal_cost.png', drawSegmentationImage(-normalCost.reshape((height, width, -1)), unaryCost.shape[-1] - 1))

    cv2.imwrite('test/unary_cost.png', drawSegmentationImage(-unaryCost.reshape((height, width, -1)), blackIndex=unaryCost.shape[-1] - 1))

    cv2.imwrite('test/segmentation.png', drawSegmentationImage(-unaries.reshape((height, width, -1)), blackIndex=unaries.shape[-1]))
    

    if 'numProposals' in parameters:
        numProposals = parameters['numProposals']
    else:
        numProposals = 3
        pass

    numProposals = min(numProposals, unaries.shape[-1] - 1)
    
    proposals = np.argpartition(unaries, numProposals)[:, :numProposals]
    unaries = -readProposalInfo(unaries, proposals).reshape((-1, numProposals))
    
    nodes = np.arange(height * width).reshape((height, width))

    deltas = [(0, 1), (1, 0)]
    
    edges = []
    edges_features = []
            
                
    for delta in deltas:
        deltaX = delta[0]
        deltaY = delta[1]
        partial_nodes = nodes[max(-deltaY, 0):min(height - deltaY, height), max(-deltaX, 0):min(width - deltaX, width)].reshape(-1)
        edges.append(np.stack([partial_nodes, partial_nodes + (deltaY * width + deltaX)], axis=1))

        labelDiff = (np.expand_dims(proposals[partial_nodes], -1) != np.expand_dims(proposals[partial_nodes + (deltaY * width + deltaX)], 1)).astype(np.float32)

        
        edges_features.append(labelDiff)
        continue

    edges = np.concatenate(edges, axis=0)
    edges_features = np.concatenate(edges_features, axis=0)


    if 'edgeWeights' in parameters:
        edgeWeights = parameters['edgeWeights']
    else:
        edgeWeights = [0.5, 0.6, 0.6]
        pass    
    
    lineSets = np.zeros((height * width, 3))
    creaseLines = np.expand_dims(np.stack([planeNormals[:, 0] / info[0], planeNormals[:, 1], -planeNormals[:, 2] / info[5]], axis=1), 1) * planesD.reshape((1, -1, 1))
    creaseLines = creaseLines - np.transpose(creaseLines, [1, 0, 2])    
    for planeIndex_1 in xrange(planes.shape[0]):
        for planeIndex_2 in xrange(planeIndex_1 + 1, planes.shape[0]):
            creaseLine = creaseLines[planeIndex_1, planeIndex_2]
            if abs(creaseLine[0]) > abs(creaseLine[2]):
                vs = np.arange(height)
                us = -(creaseLine[1] + (vs - info[6]) * creaseLine[2]) / creaseLine[0] + info[2]
                minUs = np.floor(us).astype(np.int32)
                maxUs = minUs + 1
                validIndicesMask = np.logical_and(minUs >= 0, maxUs < width)
                if validIndicesMask.sum() == 0:
                    continue
                vs = vs[validIndicesMask]
                minUs = minUs[validIndicesMask]
                maxUs = maxUs[validIndicesMask]
                edgeIndices = (height - 1) * width + (vs * (width - 1) + minUs)
                for index, edgeIndex in enumerate(edgeIndices):
                    pixel_1 = vs[index] * width + minUs[index]
                    pixel_2 = vs[index] * width + maxUs[index]
                    proposals_1 = proposals[pixel_1]
                    proposals_2 = proposals[pixel_2]                    
                    if planeIndex_1 in proposals_1 and planeIndex_2 in proposals_2:
                        proposalIndex_1 = np.where(proposals_1 == planeIndex_1)[0][0]
                        proposalIndex_2 = np.where(proposals_2 == planeIndex_2)[0][0]
                        edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[0]
                        pass
                    if planeIndex_2 in proposals_1 and planeIndex_1 in proposals_2:
                        proposalIndex_1 = np.where(proposals_1 == planeIndex_2)[0][0]
                        proposalIndex_2 = np.where(proposals_2 == planeIndex_1)[0][0]
                        edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[0]
                        pass
                    continue

                lineSets[vs * width + minUs, 0] = 1
                lineSets[vs * width + maxUs, 0] = 1
            else:
                us = np.arange(width)
                vs = -(creaseLine[1] + (us - info[2]) * creaseLine[0]) / creaseLine[2] + info[6]
                minVs = np.floor(vs).astype(np.int32)
                maxVs = minVs + 1
                validIndicesMask = np.logical_and(minVs >= 0, maxVs < height)
                if validIndicesMask.sum() == 0:
                    continue                
                us = us[validIndicesMask]
                minVs = minVs[validIndicesMask]
                maxVs = maxVs[validIndicesMask]                
                edgeIndices = (minVs * width + us)
                for index, edgeIndex in enumerate(edgeIndices):
                    pixel_1 = minVs[index] * width + us[index]
                    pixel_2 = maxVs[index] * width + us[index]
                    proposals_1 = proposals[pixel_1]
                    proposals_2 = proposals[pixel_2]                    
                    if planeIndex_1 in proposals_1 and planeIndex_2 in proposals_2:
                        proposalIndex_1 = np.where(proposals_1 == planeIndex_1)[0][0]
                        proposalIndex_2 = np.where(proposals_2 == planeIndex_2)[0][0]
                        edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[0]
                        pass
                    if planeIndex_2 in proposals_1 and planeIndex_1 in proposals_2:
                        proposalIndex_1 = np.where(proposals_1 == planeIndex_2)[0][0]
                        proposalIndex_2 = np.where(proposals_2 == planeIndex_1)[0][0]
                        edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[0]
                        pass
                    continue
                lineSets[minVs * width + us, 0] = 1
                lineSets[maxVs * width + us, 0] = 1                
                pass
            continue
        continue

    planeDepths = calcPlaneDepths(planes, width, height, np.array([info[0], info[5], info[2], info[6], info[16], info[17], 0, 0, 0, 0])).reshape((height * width, -1))
    planeDepths = readProposalInfo(planeDepths, proposals).reshape((-1, numProposals))

    planeHorizontalVPMask = np.ones((planes.shape[0], 3), dtype=np.bool)
    for VPIndex, planeIndices in enumerate(vpPlaneIndices):
        planeHorizontalVPMask[planeIndices] = False
        continue

    
    for VPIndex, lines in enumerate(VPLines):
        lp = lines[:, :2]
        ln = lines[:, 2:4] - lines[:, :2]
        ln /= np.maximum(np.linalg.norm(ln, axis=-1, keepdims=True), 1e-4)
        ln = np.stack([ln[:, 1], -ln[:, 0]], axis=1)
        lnp = (ln * lp).sum(1, keepdims=True)
        occlusionLines = np.concatenate([ln, lnp], axis=1)
        for occlusionLine in occlusionLines:
            if abs(occlusionLine[0]) > abs(occlusionLine[1]):
                vs = np.arange(height)
                us = (occlusionLine[2] - vs * occlusionLine[1]) / occlusionLine[0]
                minUs = np.floor(us).astype(np.int32)
                maxUs = minUs + 1
                validIndicesMask = np.logical_and(minUs >= 0, maxUs < width)
                vs = vs[validIndicesMask]
                minUs = minUs[validIndicesMask]
                maxUs = maxUs[validIndicesMask]                
                edgeIndices = (height - 1) * width + (vs * (width - 1) + minUs)
                for index, edgeIndex in enumerate(edgeIndices):
                    pixel_1 = vs[index] * width + minUs[index]
                    pixel_2 = vs[index] * width + maxUs[index]
                    proposals_1 = proposals[pixel_1]
                    proposals_2 = proposals[pixel_2]                    
                    for proposalIndex_1, planeIndex_1 in enumerate(proposals_1):
                        if not planeHorizontalVPMask[planeIndex_1][VPIndex]:
                            continue
                        planeDepth_1 = planeDepths[pixel_1][proposalIndex_1]
                        for proposalIndex_2, planeIndex_2 in enumerate(proposals_2):
                            if planeDepths[pixel_2][proposalIndex_2] > planeDepth_1:
                                edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[1]
                                pass
                            continue
                        continue
                    continue
                lineSets[vs * width + minUs, 1] = 1
                lineSets[vs * width + maxUs, 1] = 1
            else:
                us = np.arange(width)
                vs = (occlusionLine[2] - us * occlusionLine[0]) / occlusionLine[1]
                
                minVs = np.floor(vs).astype(np.int32)
                maxVs = minVs + 1
                validIndicesMask = np.logical_and(minVs >= 0, maxVs < height)
                us = us[validIndicesMask]
                minVs = minVs[validIndicesMask]
                maxVs = maxVs[validIndicesMask]                
                edgeIndices = (minVs * width + us)
                for index, edgeIndex in enumerate(edgeIndices):
                    pixel_1 = minVs[index] * width + us[index]
                    pixel_2 = maxVs[index] * width + us[index]
                    proposals_1 = proposals[pixel_1]
                    proposals_2 = proposals[pixel_2]                    
                    for proposalIndex_1, planeIndex_1 in enumerate(proposals_1):
                        if not planeHorizontalVPMask[planeIndex_1][VPIndex]:
                            continue
                        planeDepth_1 = planeDepths[pixel_1][proposalIndex_1]
                        for proposalIndex_2, planeIndex_2 in enumerate(proposals_2):
                            if planeDepths[pixel_2][proposalIndex_2] > planeDepth_1:
                                edges_features[edgeIndex, proposalIndex_1, proposalIndex_2] *= edgeWeights[1]
                                pass
                            continue
                        continue
                    continue
                lineSets[minVs * width + us, 1] = 1
                lineSets[maxVs * width + us, 1] = 1                
                pass
            continue
        continue

    for line in remainingLines:
        if abs(line[3] - line[1]) > abs(line[2] - line[0]):
            if line[3] < line[1]:
                line = np.array([line[2], line[3], line[0], line[1]])
                pass
            vs = np.arange(line[1], line[3] + 1, dtype=np.int32)
            us = line[0] + (vs - line[1]) / (line[3] - line[1]) * (line[2] - line[0])
            minUs = np.floor(us).astype(np.int32)
            maxUs = minUs + 1
            validIndicesMask = np.logical_and(minUs >= 0, maxUs < width)
            vs = vs[validIndicesMask]
            minUs = minUs[validIndicesMask]
            maxUs = maxUs[validIndicesMask]                
            edgeIndices = (height - 1) * width + (vs * (width - 1) + minUs)
            for edgeIndex in edgeIndices:
                edges_features[edgeIndex] *= edgeWeights[2]
                continue
            lineSets[(vs * width + minUs), 2] = 1
            lineSets[(vs * width + maxUs), 2] = 1            
        else:
            if line[2] < line[0]:
                line = np.array([line[2], line[3], line[0], line[1]])
                pass
            us = np.arange(line[0], line[2] + 1, dtype=np.int32)
            vs = line[1] + (us - line[0]) / (line[2] - line[0]) * (line[3] - line[1])
            
            minVs = np.floor(vs).astype(np.int32)
            maxVs = minVs + 1
            validIndicesMask = np.logical_and(minVs >= 0, maxVs < height)
            us = us[validIndicesMask]
            minVs = minVs[validIndicesMask]
            maxVs = maxVs[validIndicesMask]
            edgeIndices = (minVs * width + us)
            for edgeIndex in edgeIndices:
                edges_features[edgeIndex] *= edgeWeights[2]
                continue
            lineSets[minVs * width + us, 2] = 1
            lineSets[maxVs * width + us, 2] = 1
            continue
        continue
    cv2.imwrite('test/line_sets.png', drawMaskImage(lineSets.reshape((height, width, 3))))
    

    if 'smoothnessWeight' in parameters:
        smoothnessWeight = parameters['smoothnessWeight']
    else:
        smoothnessWeight = 4
        pass

    print('start')
    refined_segmentation = inference_ogm(unaries, -edges_features * smoothnessWeight, edges, return_energy=False, alg='trw')
    print('done')
    refined_segmentation = refined_segmentation.reshape([height, width, 1])    
    refined_segmentation = readProposalInfo(proposals, refined_segmentation)
    planeSegmentation = refined_segmentation.reshape([height, width])

    planeSegmentation[np.logical_not(validMask.reshape((height, width)))] = planes.shape[0]    
    cv2.imwrite('test/segmentation_refined.png', drawSegmentationImage(planeSegmentation))
    
    return planes, planeSegmentation
Пример #14
0
def auto_detect_vanishing_points(src):
    global Vanishing_point
    gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

    lines = lsd(gray)  # Line Segment Detector

    # Removing Noisy lines ie lines with length less than 25pixels
    itr = 0
    while itr < (lines.shape[0]):
        length = math.sqrt(
            pow(int(lines[itr, 0]) - int(lines[itr, 2]), 2) +
            pow(int(lines[itr, 1]) - int(lines[itr, 3]), 2))
        if length < Line_Threshold or lines[itr, 4] < Line_Threshold_Width:
            lines = np.delete(lines, (itr), axis=0)
        itr += 1

    if DEBUG:
        print lines

    line_equ = np.zeros((lines.shape[0], 3))
    sin_val = np.zeros((lines.shape[0], 1))

    if DEBUG:
        print lines.shape[0]

    # Find line equations in homogeneous coordinates and calculate its slope & sine of the angle
    for i in range(lines.shape[0]):
        pt1 = [int(lines[i, 0]), int(lines[i, 1]), 1]
        pt2 = [int(lines[i, 2]), int(lines[i, 3]), 1]
        line_equ[i] = np.cross(pt1, pt2)

        if int(lines[i, 1]) - int(lines[i, 3]) != 0:
            slope = float(
                (int(lines[i, 0]) - int(lines[i, 2]))) / (int(lines[i, 1]) -
                                                          int(lines[i, 3]))
            sin_val[i] = math.sin(math.atan(slope))
        else:  # when slope is infinity
            sin_val[i] = 1

    # Clustering of slopes into 3 categories using Kmeans
    kmeans = KMeans(n_clusters=3, random_state=0).fit(sin_val)

    # Forming line clusters and storing them into new Matrix variable of M[3][lines]
    Matrix = [[0 for x in range(1)] for y in range(3)]
    src1 = src.copy()
    for i in range(lines.shape[0]):
        pt1 = (int(lines[i, 0]), int(lines[i, 1]))
        pt2 = (int(lines[i, 2]), int(lines[i, 3]))

        width = lines[i, 4]
        colour = (255, 0, 0)
        if kmeans.labels_[i] % 3 == 0:
            colour = (255, 0, 0)
            Matrix[0].append(line_equ[i])
        elif kmeans.labels_[i] % 3 == 1:
            colour = (0, 255, 0)
            Matrix[1].append(line_equ[i])
        elif kmeans.labels_[i] % 3 == 2:
            colour = (0, 0, 255)
            Matrix[2].append(line_equ[i])

        cv2.line(src1, pt1, pt2, colour, int(np.ceil(width / 2)))
    if DEBUG:
        cv2.imshow('lines', src1)
        cv2.waitKey(0)
        # cv2.destroyAllWindows()

    cv2.imwrite(
        'Clustered_Lines_Image.jpg',
        src1)  # image with 3 cluster - each line put in one of the clusters

    del Matrix[0][
        0]  # bad coding - first element set to 0 to enable it Matrix.append easily
    del Matrix[1][
        0]  # bad coding - first element set to 0 to enable it Matrix.append easily
    del Matrix[2][
        0]  # bad coding - first element set to 0 to enable it Matrix.append easily

    global max_inliers

    for index in range(0, 3):  # iterating along 3 line clusters
        max_inliers = 0
        vp = [0, 0, 0]
        for i in range(0, N_iterations):  # Start Ransac and Repeat N times
            a = random.sample(
                Matrix[index],
                2)  # Selecting 2 lines at random from the lines cluster
            point_vanish = intersection_point(
                a[0], a[1]
            )  # Determine the possible vanishing point as the intersection
            #  of selected random lines

            if point_vanish[
                    2] != 0:  # Bad Coding - Ignoring the cases where points meet at infinity as
                # I am not able handel them while distance calculation
                inliers_count = line_point_distance(Matrix[index],
                                                    point_vanish, lines,
                                                    line_equ, src)
            else:
                inliers_count = 0

            if inliers_count > max_inliers:
                max_inliers = inliers_count
                best_lines = a
                vp = point_vanish
                #print max_inliers

        Vanishing_point.append(vp)

        for j in range(2):
            for x in range(line_equ.shape[0]):
                if best_lines[j][0] == line_equ[x][0] and best_lines[j][1] == line_equ[x][1] and best_lines[j][2] == \
                        line_equ[x][2]:
                    pt1 = (int(lines[x, 0]), int(lines[x, 1]))
                    pt2 = (int(lines[x, 2]), int(lines[x, 3]))
                    width = lines[x, 4]
                    color = [0, 0, 0]
                    color[j] = 255
                    color = tuple(color)
                    cv2.line(src, pt1, pt2, color, int(np.ceil(width / 2)))

        if DEBUG:
            cv2.imshow('lines', src)
            cv2.waitKey(0)

    print Vanishing_point