Example #1
0
def region_of_interest(image):
    height = image.shape[0]
    polygons = np.array([[(200, height), (1100, height), (550, 250)]])
    mask = np.zeros_like(image)
    cv2.fillPoly(mask, polygons, 255)
    masked_image = cv2.bitwise_and(image, mask)
    return masked_image
	def get(self):
		while True:
			ret, frame = self.cap.read()
			if not ret:
				break
			else:
				fgmask = self.fgbg.apply(frame)
				fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel)
				fgmask = cv2.dilate(fgmask, self.kernel, iterations = 2)
				a, contours, hierarchy = cv2.findContours(fgmask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
				mask = np.zeros(frame.shape, dtype=np.uint8)
				channel_count = frame.shape[2]  # i.e. 3 or 4 depending on your image
				ignore_mask_color = (255,)*channel_count
				cv2.fillPoly(mask, contours, ignore_mask_color)
				# apply the mask
				masked_image = cv2.bitwise_and(frame, mask)
				if contours != 0:
					returnContours = []
					for cnt in contours:
						if cv2.contourArea(cnt) > self.minContourArea:
							x,y,w,h = cv2.boundingRect(cnt)
							masked_image = masked_image[y:y+h, x:x+w]
							returnContours.append((masked_image, (x,y,w,h), frame))
					if returnContours:
						yield returnContours
Example #3
0
def project_lane_lines(img,left_fitx,right_fitx,yvals):
    
    # Create an image to draw the lines on
    color_warp = np.zeros_like(img).astype(np.uint8)

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, yvals]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, yvals])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.polylines(color_warp, np.int_([pts]), isClosed=False, color=(255,0,0), thickness=20)
    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
    
    
        
    undist = undistort(img)  
    #sp = (550, 310) 
    #ep = (700, 460)
    #for i in range(4):
        #center = ((ep[0] + sp[0])/2 , )
        #cv2.rectangle(undist, (550, 310), (700, 460), (0,0,255), 4)
    unwarp,Minv = warp(img,bird_view=False)

    

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0])) 
    # Combine the result with the original image
    result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
    return result
Example #4
0
def mask(img):
  biggest =None
  max_area = 0
  grey = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
  #blk = cv2.bitwise_not(grey)
  kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
  res = cv2.morphologyEx(grey,cv2.MORPH_OPEN,kernel)
  ret,thresh = cv2.threshold(grey,127,255,0)
  contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
  dest = np.zeros(thresh.shape, np.uint8)
  print contours[::1]
  print len(contours)
  print hierarchy
  for cnt in contours[::1]:
    rect  = cv2.minAreaRect(cnt)
    points = cv2.cv.BoxPoints(rect)
    points  = np.int0(np.around(points))
    #cv2.drawContours(dest, [cnt],0,(0,255,0),2)
    #cv2.polylines(dest, [points], True,( 255,255,255), 2 )
    cv2.fillPoly(orig, [cnt], (100,20,90), 4)
    cv2.fillPoly(dest, [cnt], (255,255,255), 4)

    x = cv2.cvtColor(dest,cv2.COLOR_GRAY2RGB)
    cv2.imshow('contour-highlighted image.jpg', x)
    cv2.imwrite("../../images/bound.jpg", x)

    cv2.imshow('masked image', orig)
Example #5
0
def region_of_interest(img):
    """
    Applies an image mask.
    
    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """    
    shape = img.shape
    vertices = np.array([[(0,0),(shape[1],0),(shape[1],0),(6*shape[1]/7,shape[0]),
                      (shape[1]/7,shape[0]), (0,0)]],dtype=np.int32)

    mask = np.zeros_like(img)   
    
    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255
        
    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    
    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
Example #6
0
def draw_polygons(img, polygons, thickness=1, color=(0, 255, 0)):
    color = convert_color(color)
    if thickness == -1:
        fillPoly(img, polygons, color)
    else:
        polylines(img, array(polygons, dtype='int32'), 1, color,
                  thickness=thickness)
def reptfulle(tabc,dx,dy):
    imgi = np.zeros((dx,dy,3), np.uint8)
    cv2.polylines(imgi,[tabc],True,(1,1,1)) 
    cv2.fillPoly(imgi,[tabc],(1,1,1))
    tabzi = np.array(imgi)
    tabz = tabzi[:, :,1]   
    return tabz, imgi
def crop_waffle(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    greyscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    lower_yellow = np.array([0,50,50])
    upper_yellow = np.array([70,255,255])
    mask = cv2.inRange(hsv, np.uint8(lower_yellow), np.uint8(upper_yellow))
    kernel = np.ones((9,9),np.uint8)
    closed_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    masked_img = cv2.bitwise_and(greyscale,greyscale,mask = closed_mask)
    [contours,hiearchy] = cv2.findContours(masked_img,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
    #now find the largest contour
    max_area = 0
    max_contour = None
    for c in contours:
        #we change datatypes from numpy arrays to cv arrays and back because contour area only takes cv arrays.
        c = cv.fromarray(c)
        if cv.ContourArea(c) > max_area:
            max_contour = c
            max_area = cv.ContourArea(c)
    max_contour = np.asarray(max_contour)
    shape = img.shape
    largest_blob_mask = np.zeros((shape[0],shape[1],1),np.uint8)
    cv2.fillPoly(largest_blob_mask, pts =[max_contour], color=(255,255,255))
    print_rgb_hist(img,largest_blob_mask)
    return cv2.bitwise_and(img,img, mask= largest_blob_mask)
Example #9
0
def get_external_contour(points, resolution=None):
    """ takes a list of `points` defining a linear ring, which can be 
    self-intersecting, and returns an approximation to the external contour """
    if resolution is None:
        # determine resolution from minimal distance of consecutive points
        dist_min = np.inf
        for p1, p2 in itertools.izip(np.roll(points, 1, axis=0), points):
            dist = curves.point_distance(p1, p2)
            if dist > 0:
                dist_min = min(dist_min, dist)
        resolution = 0.5*dist_min
        
        # limit the resolution such that there are at most 2048 points
        dim_max = np.max(np.ptp(points, axis=0)) #< longest dimension
        resolution = max(resolution, dim_max/2048)

    # build a linear ring with integer coordinates
    ps_int = np.array(np.asarray(points)/resolution, np.int)
    ring = geometry.LinearRing(ps_int)

    # get the image of the linear ring by plotting it into a mask
    x_min, y_min, x_max, y_max = ring.bounds
    shape = ((y_max - y_min) + 3, (x_max - x_min) + 3)
    x_off, y_off = int(x_min - 1), int(y_min - 1)
    mask = np.zeros(shape, np.uint8)
    cv2.fillPoly(mask, [ps_int], 255, offset=(-x_off, -y_off))

    # find the contour of this mask to recover the exterior contour
    contours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE,
                                offset=(x_off, y_off))[1]
    return np.array(np.squeeze(contours))*resolution
    def visualise_measurements(self):

        if not self.current_data_visualised:
            self.current_data_visualised = True
            cnt = lambda x: np.rint(np.array(x)*self.fusion_model_params.cvscale).astype(int)

            img = np.zeros((self.particle_filter_model_param.world_dimensions[1]*self.fusion_model_params.cvscale,
                            self.particle_filter_model_param.world_dimensions[0]*self.fusion_model_params.cvscale,3)).astype('uint8')
            img +=255


            im_copies = []
            #Draw Usable Area
            if self.particle_filter_model_param.world_usable_area != [[]]:
                cv2.polylines(img,[cnt(self.particle_filter_model_param.world_usable_area.exterior.coords[:])],
                              isClosed=True, color=(0,0,0), thickness=2)
            #Add sensors
            for k, sensor_prop in self.sensor_properties.items():
                img_copy = copy.deepcopy(img)
                cv2.fillPoly(img_copy,
                             [cnt(self.sensor_properties[k].measurement_boundary_poly.exterior.coords[:])],
                             (128,128,128))
                im_copies.append(img_copy)

            max_w = np.min(np.max([p.w for p in self.pir_model.particles]),0)
            img_copy = copy.deepcopy(img)

            for particle in self.pir_model.particles:

                col = 255.*np.log((particle.w+1)/(max_w+1))
                cv2.circle(img_copy,
                           (int(particle.location.x*self.fusion_model_params.cvscale),
                            int(particle.location.y*self.fusion_model_params.cvscale)),5,(0,col,0),-1)

            im_copies.append(img_copy)

            for i,overlay in enumerate(im_copies):
                opacity = 0.4
                cv2.addWeighted(overlay, opacity, img, 1-opacity, 0, img)

            for sensor in self.sensor_properties.values():
                cv2.circle(img,(int(sensor.location.x*self.fusion_model_params.cvscale),
                                int(sensor.location.y*self.fusion_model_params.cvscale)),10,(0,0,255),-1)



            if(self.m_x != -1 and self.m_y != -1) and not (self.m_x is np.inf or self.m_y is np.inf):
                if self.good:
                    cv2.circle(img,(int(self.m_x*self.fusion_model_params.cvscale),
                                    int(self.m_y*self.fusion_model_params.cvscale)),
                               min(12,int(self.fusion_model_params.cvscale/8.)),(255,255,0),1)
                else:
                    cv2.circle(img,(int(self.m_x*self.fusion_model_params.cvscale),
                                    int(self.m_y*self.fusion_model_params.cvscale)),
                               min(12,int(self.fusion_model_params.cvscale/8.)),(64,64,0),1)

            cv2.imshow('test',cv2.flip(img,0))
            cv2.waitKey(1)
        else:
            logging.debug('Data unchanged visualisation not updated.')
Example #11
0
def drawFloorCrop(event,x,y,flags,params):
    global perspectiveMatrix,name,RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow('Floor Corners for ' + name)
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h=params['imgFloorCorners'].shape[0]
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)   # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h, 0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        cv2.destroyWindow('Floor Corners for ' + name)
        tetragonVerticesUpd = np.float32([[0,0],[0,h],[h,h],[h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x, y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x, y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x, y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(imgCroppingPolygon, [np.reshape(params['croppingPolygons'][name], (len(params['croppingPolygons'][name]),2))], BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow('Floor Corners for ' + name, imgCroppingPolygon)
Example #12
0
def draw_boxes(image, boxes, labels, obj_thresh, quiet=True):
    for box in boxes:
        label_str = ''
        label = -1
        
        for i in range(len(labels)):
            if box.classes[i] > obj_thresh:
                if label_str != '': label_str += ', '
                label_str += (labels[i] + ' ' + str(round(box.get_score()*100, 2)) + '%')
                label = i
            if not quiet: print(label_str)
                
        if label >= 0:
            text_size = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)
            width, height = text_size[0][0], text_size[0][1]
            region = np.array([[box.xmin-3,        box.ymin], 
                               [box.xmin-3,        box.ymin-height-26], 
                               [box.xmin+width+13, box.ymin-height-26], 
                               [box.xmin+width+13, box.ymin]], dtype='int32')  

            cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5)
            cv2.fillPoly(img=image, pts=[region], color=get_color(label))
            cv2.putText(img=image, 
                        text=label_str, 
                        org=(box.xmin+13, box.ymin - 13), 
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX, 
                        fontScale=1e-3 * image.shape[0], 
                        color=(0,0,0), 
                        thickness=2)
        
    return image          
    def transform_map(self):
        self.triangulate()

        slam_map = cv2.imread(self.img_2, 0)
        rows, cols = cv2.imread(self.img_1, 0).shape
        output = np.zeros((rows, cols), np.uint8)
        slam_nodes = self.nodes("register/slam.1.node")
        semantic_nodes = self.nodes("register/semantic.1.node")
        # From the ele file, color triangles
        first_line = True
        with open("register/slam.1.ele", 'r') as f:
            for line in f:
                if first_line:
                    # Do nothing
                    first_line = False 
                elif '#' not in line:
                    # This line is not a comment
                    s = line.split()
                    node_index_1 = int(s[1])
                    node_index_2 = int(s[2])
                    node_index_3 = int(s[3])
                    slam_pts = [slam_nodes[node_index_1], slam_nodes[node_index_2], slam_nodes[node_index_3]]
                    semantic_pts = [semantic_nodes[node_index_1], semantic_nodes[node_index_2], semantic_nodes[node_index_3]]
                    transform = cv2.getAffineTransform(np.array(slam_pts, dtype='float32'), np.array(semantic_pts, dtype='float32'))
                    if transform != None:
                        all_transformed = cv2.warpAffine(slam_map, transform, (cols, rows))
                        area = np.array(semantic_pts, dtype='int32')
                        area = area.reshape((-1, 1, 2))
                        mask = np.zeros((rows, cols), np.uint8)
                        cv2.fillPoly(mask, [area], 255)
                        tmp = cv2.bitwise_and(all_transformed, mask)
                        output = cv2.add(tmp, output)
            cv2.imshow('Output', output)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
    def color_triangles(self, image):
        if image == "semantic":
            node_file = "register/semantic.1.node"
            ele_file = "register/semantic.1.ele"
            rows, cols = cv2.imread(self.img_1, 0).shape
        elif image == "slam":
            node_file = "register/slam.1.node"
            ele_file = "register/slam.1.ele"
            rows, cols = cv2.imread(self.img_2, 0).shape
        else:
            return

        tri_img = np.zeros((rows, cols,3), np.uint8)
        # Seed nodes with None since triangles are 1-indexed
        nodes = self.nodes(node_file)
        # From the ele file, color triangles
        first_line = True
        with open(ele_file, 'r') as f:
            for line in f:
                if first_line:
                    # Do nothing
                    first_line = False 
                elif '#' not in line:
                    # This line is not a comment
                    s = line.split()
                    v1 = nodes[int(s[1])]
                    v2 = nodes[int(s[2])]
                    v3 = nodes[int(s[3])]
                    pts = np.array([v1, v2, v3], np.int32)
                    pts = pts.reshape((-1,1,2))
                    color = self.robot.triangle_to_color(int(s[0]))
                    cv2.fillPoly(tri_img,[pts],color)
        img_name = image + ".png"
        cv2.imwrite(img_name, tri_img)
        call(["mv", img_name, "register/" + img_name])
Example #15
0
def addpatch(col,lab, xt,yt,px,py,dimtabx,dimtaby):
    imgi = np.zeros((dimtabx,dimtaby,3), np.uint8)
    tablint=[(xt,yt),(xt,yt+py),(xt+px,yt+py),(xt+px,yt)]
    tabtxt=np.asarray(tablint)
    cv2.polylines(imgi,[tabtxt],True,col)
    cv2.fillPoly(imgi,[tabtxt],col)
    return imgi
Example #16
0
def load_image_and_contour(contour_path, radius=None):
    image_path = get_image_by_contour(contour_path)
    f = dicom.read_file(image_path)
    window_min = int(f.WindowCenter) - 0.5 * int(f.WindowWidth)
    image = 255 * np.clip(
        (f.pixel_array.astype(np.float) - window_min) / float(f.WindowWidth),
        0.0, 1.0)
    contour = np.zeros_like(image, dtype='uint8')
    ctrs = np.loadtxt(contour_path, delimiter=' ').astype(np.int)
    cv2.fillPoly(contour, [ctrs], 1)

    def center(args):
        img, con = args
        ctr_x, ctr_y = initial_roi.get_real_center(con)
        ctr_x += radius * random.random() - radius // 2
        ctr_y += radius * random.random() - radius // 2
        def crop_ctr(ii):
            shape_x, shape_y = ii.shape
            return np.pad(ii, radius, mode='edge') \
                [ctr_x:ctr_x + 2 * radius, ctr_y:ctr_y + 2 * radius]
        return map(crop_ctr, (img, con))

    return map(center,
               [random_transformation(image, contour)
                for _ in xrange(10)])
Example #17
0
def project(warped, img, ploty, left_fitx, right_fitx, M, avg_curverad, offset, avg_left_fitx, avg_right_fitx):
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    Minv = np.linalg.inv(M)
    newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))

    # Combine the result with the original image
    result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)

    font = cv2.FONT_HERSHEY_SIMPLEX
    text = "Radius of Curvature: {} m".format(int(avg_curverad))
    cv2.putText(result, text, (0, 50), font, 1, (255, 255, 255), 3)

    if (offset < 0):
        text = "Vehicle is {:.2f} m left of center".format(-offset)
    else:
        text = "Vehicle is {:.2f} m right of center".format(offset)

    cv2.putText(result, text, (0, 100), font, 1, (255, 255, 255), 3)
    # plt.imshow(result)
    # plt.show()
    return result
def CropRoI(img):
    y_shape, x_shape, _ = img.shape

    X = np.zeros(4)
    Y = np.zeros(4)

    #top left
    X[0], Y[0] = x_shape // 4, y_shape // 3
    #bottom left
    X[1], Y[1] = 0, y_shape
    #bottom right
    X[2], Y[2] = x_shape, Y[1]
    #top right
    X[3], Y[3] = 4 * x_shape // 5, Y[0]
    
    # mask defaulting to black for 3-channel and transparent for 4-channel
    # (of course replace corners with yours)
    mask = np.zeros(img.shape[:2], dtype=np.uint8)
    roi_corners = np.array([zip(X, Y)], dtype=np.int32)
    # fill the ROI so it doesn't get wiped out when the mask is applied
    channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
    ignore_mask_color = (255,) * channel_count
    cv2.fillPoly(mask, roi_corners, ignore_mask_color)

    # apply the mask
    masked_image = cv2.bitwise_and(img, img, mask=mask)

    # save the result
    #cv2.imwrite('image_masked.png', masked_image)

    return masked_image
Example #19
0
def fit2BiggestContour(image):
    contour = getBiggestContour(image)
    x,y, w, h = cv2.boundingRect(contour)
    mask = np.zeros( (h,w), dtype=np.uint8 )
    contour = [ [[p[0][0]-x,p[0][1]-y]] for p in contour ]
    cv2.fillPoly( mask, np.array([contour]), 255 )
    return mask, (x,y)
Example #20
0
    def getRoi(self, img, roi_type='polygon'):
        imshape = img.shape
        # ploygon_vertices = np.array([[(110,imshape[0]),(410, 310),(480, 310), (imshape[1],imshape[0])]], dtype=np.int32)
        triangle_vertices = np.array([[(0, imshape[0]),(imshape[1]/2, imshape[0]/2),(imshape[1],imshape[0])]], dtype=np.int32)
        polygon_vertices = np.array([[(0, imshape[0]),(imshape[1]/2-imshape[1]/8, imshape[0]/2+imshape[0]/8),(imshape[1]/2+imshape[1]/8, imshape[0]/2+imshape[0]/8),(imshape[1],imshape[0])]], dtype=np.int32)
        # noisy
        # polygon_vertices = np.array([[(0, imshape[0]),(0, 3*imshape[0]/4),(imshape[1]/4,
        #    imshape[0]/2),(3*imshape[1]/4,imshape[0]/2),(imshape[1],3*imshape[0]/4),(imshape[1], imshape[0])]], dtype=np.int32)
        mask = np.zeros_like(img)

        # Use a mask color of the same number of channels as the image
        if len(imshape) > 2:
            channel_count = imshape[2]
            ignore_mask_color = (255,)* channel_count
        else:
            ignore_mask_color = 255

        if roi_type == 'triangle':
            cv2.fillPoly(mask, triangle_vertices, ignore_mask_color)
        elif roi_type == 'polygon':
            cv2.fillPoly(mask, polygon_vertices, ignore_mask_color)

        #if self.LOG_LEVEL >1:
        #    showImage('roi_type_mask', mask)
        masked_image = cv2.bitwise_and(img, mask)
        if self.LOG_LEVEL >0:
            showImage('masked_image', masked_image)
        return masked_image
Example #21
0
def reptfulle(tabc,dx,dy,col):
    imgi = np.zeros((dx,dy,3), np.uint8)
    cv2.polylines(imgi,[tabc],True,col)
    cv2.fillPoly(imgi,[tabc],col)
#    tabzi = np.array(imgi)
#    tabz = tabzi[:, :,1]
    return imgi
Example #22
0
    def _get_bird_mask(self):
        bird_homo_corners = np.dot(self._perspective_matrix, self._perspec_corners)
        # Initialize the output corners
        bird_corners = [[1, 1, 1, 1],
                        [1, 1, 1, 1]]
        # Populate the above matrix
        for col in range(0,4):
            for row in range(0,2):
                bird_corners[row][col] = bird_homo_corners[row][col] / bird_homo_corners[2][col]
        c = 10
        cushion = [[c, -c, -c, c],
                   [c, c, -c, -c]
                  ]
        bird_corners = np.add(bird_corners, cushion)
        self.trapezoid = bird_corners
        self.trapezoid = np.vstack([self.trapezoid, [1,1, 1, 1]])

        mask_corners = np.array([ [bird_corners[0][0], bird_corners[1][0]],
                                  [bird_corners[0][1], bird_corners[1][1]],
                                  [bird_corners[0][2], bird_corners[1][2]],
                                  [bird_corners[0][3], bird_corners[1][3]]], np.int32)

        # Fill with 255 instead of 1, or else it doesn't detect all of the features properly
        cv2.fillPoly(self.bird_mask, [mask_corners], 255)
        return
Example #23
0
def showBWImage(current_frame,i):
    #cnt = (389,263),(40,34),0
    #box = cv2.cv.BoxPoints(cnt)
    #box = np.int0(box)
    if i==1: 
        cap = cap1
        label = labelArr1[current_frame]
    else: 
        cap = cap2
        label = labelArr2[current_frame]
    #current_frame = cv2.getTrackbarPos("Silder1", "Video")
    cap.set(1,current_frame)
    ret,image = cap.read()
    if label[1]!="None":
        if label[1] == "Right" or label[1] == "Left" or label[1] == "Intersect":
            cnt = (float(label[2]),float(label[3])),(float(label[4]),float(label[5])),float(label[6])
            box = cv2.cv.BoxPoints(cnt)
            box = np.int0(box)
            mask = np.zeros((480,640,3), np.uint8)            
            cv2.fillPoly(mask, np.int32([box]), (255,255,255))            
            cv2.threshold(image,int(label[7]),255,cv2.THRESH_BINARY,image)            
            masked_image = cv2.bitwise_and(image, mask)
            cv2.drawContours(masked_image ,[box],0,(0,0,255),2)
            masked_image = cv2.resize(masked_image,(320,240))
            masked_image = cv2.copyMakeBorder(masked_image,2,2,2,2,cv2.BORDER_CONSTANT,value=(0,0,255))
            #merged_frame[:244, 344:668]= masked_image
    else:
        masked_image = np.zeros((240,320,3), np.uint8)
        masked_image = cv2.copyMakeBorder(masked_image,2,2,2,2,cv2.BORDER_CONSTANT,value=(0,0,255))
    if i==1: merged_frame[:244, 344:668]= masked_image 
    else: merged_frame[254:498, 344:668]= masked_image 
Example #24
0
def warpImage(original, feats, tri, img_path):
	image = cv2.imread(img_path)
	white = (255, 255, 255)
	rows,cols,ch = image.shape
	masked_image = np.zeros(image.shape, dtype=np.uint8)
	for t in tri:
		old_a = original[t[0]]
		old_b = original[t[1]]
		old_c = original[t[2]]
		new_a = feats[t[0]]
		new_b = feats[t[1]]
		new_c = feats[t[2]]
		pts1 = np.float32([old_a,old_b,old_c])
		pts2 = np.float32([new_a,new_b,new_c])
		M = cv2.getAffineTransform(pts1,pts2)
		dst = cv2.warpAffine(image,M,(cols,rows))
		# cv2.imshow('masked image', dst)
		mask = np.zeros(image.shape, dtype=np.uint8)
		roi_corners = np.array([[new_a, new_b, new_c]], dtype=np.int32)
		cv2.fillPoly(mask, roi_corners, white)
		masked = cv2.bitwise_and(dst, mask)
		masked_image = cv2.bitwise_or(masked_image, masked)
	# cv2.imshow('masked image', masked_image)
	# cv2.waitKey()
	# cv2.destroyAllWindows()
	return masked_image
Example #25
0
def processing_image(dev, color_stream):
	throttle = 0
	steering_angle = 0.0
	frame_color = color_stream.read_frame()
	frame_color_data = frame_color.get_buffer_as_uint8()
	frame = np.frombuffer(frame_color_data, dtype=np.uint8)
	frame.shape = (480, 640, 3)
	frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR);
	frame = cv2.flip(frame,1)
	img_gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	# Set threshold and maxValue
	thresh = 220
	maxValue = 255

	# Basic threshold example
	th, img_thres = cv2.threshold(img_gray, thresh, maxValue, cv2.THRESH_BINARY);

	ignore_mask_color = (255,) * frame.shape[2]
	mask = np.zeros_like(img_thres) 
	cv2.fillPoly(mask, [ polygon ], ignore_mask_color)

	masked_image = cv2.bitwise_and(img_thres, mask)

	img_eroded = cv2.erode(masked_image, kernel, iterations=1)
	
	center_x_old, straight_count, straight_flag, junction_flag, junction_frame =center_path(gray, img_eroded,min_y,max_y, direction, center_x_old, tolerance, sum_required, straight_count, straight_flag, count, junction_flag, junction_frame)
	
	#Calculate steering angle
	kp = 0.8
	steering_angle = kp*(320-center_x_old)
	#cv2.imshow("Live feed", frame)
	
	return throttle, steering_angle
Example #26
0
 def outputROIMask(self, aImage, aROIPointsList):
     pointsArray = np.array(aROIPointsList)
     pointsArray = pointsArray.reshape((-1,1,2))
     mask = np.zeros(aImage.shape, dtype=np.uint8)
     white = (255,255,255)
     cv2.fillPoly(mask, np.int32([pointsArray]), white)
     return mask  
Example #27
0
def get_next_window(img,center_point,width):
    """
    input: img,center_point,width
        img: binary 3 channel image
        center_point: center of window
        width: width of window
    
    output: masked,center_point
        masked : a masked image of the same size. mask is a window centered at center_point
        center : the mean ofall pixels found within the window
    """
    
    ny,nx,_ = img.shape
    mask  = np.zeros_like(img)
    if (center_point <= width/2): center_point = width/2
    if (center_point >= nx-width/2): center_point = nx-width/2
    
    left  = center_point - width/2
    right = center_point + width/2
    
    vertices = np.array([[(left,0),(left,ny), (right,ny),(right,0)]], dtype=np.int32)
    ignore_mask_color=(255,255,255)
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    masked = cv2.bitwise_and(mask,img)

    hist = np.sum(masked[:,:,0],axis=0)
    if max(hist>10000):
        center = np.argmax(hist)
    else:
        center = center_point
        
    return masked,center
Example #28
0
 def maskImgWithROI(self, aImage, aROIPointsList):
     pointsArray = np.array(aROIPointsList)
     mask = np.zeros(aImage.shape, dtype=np.uint8)
     white = (255,255,255)
     cv2.fillPoly(mask, np.int32([pointsArray]), white)
     maskedImage = cv2.bitwise_and(aImage, mask)
     return maskedImage  
Example #29
0
def GenSet(type, n_of_case, triangle_size, height, width):
	triangle = n_of_case * triangle_size
	url = "./images/" + type
	if not os.path.exists(url):
		os.makedirs(url)
	for i in range(n_of_case):
		img = np.zeros((height, width), np.uint8)
		img[:, :] = 255
		if (i < triangle):
			n = 3
			kind = 1 # triangle
		else:
			n = np.random.random_integers(4, 10)
			kind = 0 # other
		pts = []
		for j in range(n):
			x = np.random.random_integers(0, height)
			y = np.random.random_integers(0, width)
			pts.append((x, y))

		cv2.fillPoly(img, [np.array([pts])], 0)
		#Display the image
		#cv2.imshow("img", img)
		#cv2.waitKey(0)
		
		# 1 for triangle, 0 for other
		cv2.imwrite(url + "/%i_%i.jpeg" % (i, kind), img)
Example #30
0
    def setMarker(self, imageSize=(1600,1200), bgColor=255, fgColor=0):
        """Setup the marker.  By default the marker will be 1600x1200, with a white background and a black
        foreground.
        """


        imageSizeScale = np.array(imageSize, dtype=np.float)

        # We have to reverse the X and Y sizes for numpy's benefit.
        self.array = np.ones((imageSize[1], imageSize[0]), dtype=np.uint8) * bgColor

        self.shapes = dict()

        # Turn the fractional coordinates of the triangles in self.rawShapes['triangles'] into
        # actual coordinates now that we know the size of the image.
        self.shapes['triangles'] = [
            (triangle * imageSizeScale).astype('int32')
            for triangle in self.rawShapes['triangles']
        ]

        # Turn the fractional coordinates of the points in self.rawShapes['border'] into
        # actual coordinates now that we know the size of the image.
        self.shapes['border'] = [
            (border * imageSizeScale).astype('int32')
            for border in self.rawShapes['border']
        ]

        # Actually draw the shapes in the image.
        cv2.fillPoly(self.array, self.shapes['triangles'], fgColor)
        cv2.fillPoly(self.array, self.shapes['border'], fgColor)
def local_line_search(wraped_binarized, left_fit, right_fit, ym_per_pix=30 / 720, xm_per_pix=3.7 / 700,
                      margin=100, with_debug_image=True):
    DRAWN_CURVE_LINE_WIDTH = 4  # width of final curve in pixels

    nonzero = wraped_binarized.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])

    left_lane_indx = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) &
                      (nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))

    right_lane_indx = ((nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) &
                       (nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))

    # Again, extract left and right line pixel positions
    left_x = nonzerox[left_lane_indx]
    left_y = nonzeroy[left_lane_indx]
    right_x = nonzerox[right_lane_indx]
    right_y = nonzeroy[right_lane_indx]

    if len(left_y) == 0 or len(right_y) == 0 or len(left_x) != len(left_y) or len(right_y) != len(right_x):
        return None, None

    # Fit a second order polynomial to each
    left_fit = np.polyfit(left_y, left_x, 2)
    right_fit = np.polyfit(right_y, right_x, 2)

    # Generate x and y values for plotting
    ploty = (np.linspace(0, wraped_binarized.shape[0] - 1, wraped_binarized.shape[0])).astype(np.int)
    left_fitx = (left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]).astype(np.int)
    right_fitx = (right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]).astype(np.int)

    lane_position = get_lane_position(wraped_binarized, left_fitx, right_fitx, xm_per_pix)
    lane_width = get_lane_width(left_fitx, right_fitx, xm_per_pix)

    result = np.dstack((wraped_binarized, wraped_binarized, wraped_binarized)) * 255

    if with_debug_image:
        logger.info('lane_position %s m' % lane_position)
        logger.info('lane_width %s m' % lane_width)

        # Create an image to draw on and an image to show the selection window
        out_img = np.dstack((wraped_binarized, wraped_binarized, wraped_binarized)) * 255
        window_img = np.zeros_like(out_img)
        # Color in left and right line pixels
        out_img[nonzeroy[left_lane_indx], nonzerox[left_lane_indx]] = [255, 0, 0]
        out_img[nonzeroy[right_lane_indx], nonzerox[right_lane_indx]] = [0, 0, 255]

        # Generate a polygon to illustrate the search window area
        # And recast the x and y points into usable format for cv2.fillPoly()
        left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
        left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin,
                                                                        ploty])))])
        left_line_pts = np.hstack((left_line_window1, left_line_window2))
        right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
        right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin,
                                                                         ploty])))])
        right_line_pts = np.hstack((right_line_window1, right_line_window2))

        # Draw the lane onto the warped blank image
        cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
        cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
        result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)

        draw_fit_curves_on_image(result, left_fitx, right_fitx, ploty, DRAWN_CURVE_LINE_WIDTH)

    # curvature in meters
    left_curverad_m, right_curverad_m = curvatures_in_meters(
        left_x, left_y, ploty, right_x, right_y, xm_per_pix, ym_per_pix)

    if with_debug_image:
        logger.info("Curvature right: %s m, left: %s m" % (left_curverad_m, right_curverad_m))

    lane_info = LaneInfo()
    lane_info.left_fit = left_fit
    lane_info.right_fit = right_fit
    lane_info.left_fitx = left_fitx
    lane_info.right_fitx = right_fitx
    lane_info.ploty = ploty
    lane_info.left_curverad_m = left_curverad_m
    lane_info.right_curverad_m = right_curverad_m
    lane_info.lane_position = lane_position
    lane_info.lane_width = lane_width
    lane_info.min_left_y = 0
    lane_info.max_left_y = wraped_binarized.shape[0]
    lane_info.min_right_y = 0
    lane_info.max_right_y = wraped_binarized.shape[0]

    return result, lane_info
Example #32
0
def gen_one_voc_train_dir():
    # rootdir = '/users/maqiao/mq/Data_checked/multiClass/multiClass0320'
    # root_path = "/users/maqiao/mq/Data_checked/multiClass/pucheng20191101"
    # rootdirs = [
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass0320',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass0507',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass0606',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass0704',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190808',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190814',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190822-1',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190822-3',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190823',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190826',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190827',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190827_1',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190830',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190830_1',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190830_2',
    # '/users/maqiao/mq/Data_checked/multiClass/multiClass190830_3'
    # "/users/maqiao/mq/Data_checked/multiClass/mark/houhaicui",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/limingqing",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/mayanzhuo",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/quanqingfang",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/shenjinyan",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/wanglinan",
    # "/users/maqiao/mq/Data_checked/multiClass/mark/yangyanping",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/houhaicui",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/limingqing",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/mayanzhuo",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/quanqingfang",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/shenjinyan",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/wanglinan",
    # "/users/maqiao/mq/Data_checked/multiClass/duomubiao/yangyanping",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20190912",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20190920",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20190925",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20190930",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20191011",
    # "/users/maqiao/mq/Data_checked/multiClass/tricycle_bigCar20191018",
    # "/users/maqiao/mq/Data_checked/multiClass/pucheng20191012",
    # "/users/maqiao/mq/Data_checked/multiClass/pucheng20191017",
    # "/users/maqiao/mq/Data_checked/multiClass/pucheng20191025",
    # "/users/maqiao/mq/Data_checked/multiClass/pucheng20191101"]
    # changsha_test_poly_nointer
    # /mnt/diskb/maqiao/multiClass/beijing20200110
    # /mnt/diskb/maqiao/multiClass/changsha20191224-2
    root_path = '/mnt/diskb/maqiao/multiClass/c5_puer_20200611'
    root_dirs = ["/mnt/diskb/maqiao/multiClass/c5_puer_20200611"]
    # root_path = '/users/duanyou/backup_c5/changsha_c5/test_new_chuiting'
    # rootdirs =  ["/users/duanyou/backup_c5/changsha_c5/test_new_chuiting"]
    # root_path = 'F:/mq1/test_data'
    # rootdirs  = [root_path+'/1']
    all_list_file = os.path.join(root_path, 'multiClass_train.txt')
    all_list = open(os.path.join(root_path, all_list_file), 'w')
    dir_num = len(root_dirs)
    for j, root_dir in enumerate(root_dirs):
        img_path = root_dir + '/' + "JPEGImages_ori"
        img_path_dst = root_dir + '/' + "JPEGImages"
        xml_path = root_dir + '/' + "Annotations"
        label_path = root_dir + '/' + "labels"

        if not os.path.exists(label_path):
            os.makedirs(label_path)
        if not os.path.exists(img_path_dst):
            os.makedirs(img_path_dst)

        list_file = open(root_dir + '/' + "train.txt", 'w')
        file_lists = os.listdir(img_path)
        file_num = len(file_lists)

        label_count = [0 for i in range(class_num)]
        for i, img_name in enumerate(file_lists):
            print("**************************************************************************************" +
                  str(i) + '/' + str(file_num) + '  ' + str(j) + '/' + str(dir_num))
            print(img_path + '/' + img_name)
            print(xml_path + '/' + img_name[:-4] + ".xml")

            if img_name.endswith('.jpg') and os.path.exists(xml_path + '/' + img_name[:-4] + ".xml"):
                if not os.path.exists(img_path):  # 没有对应的图片则跳过
                    continue

                poly_non, boxes_non, label_statistics = convert_annotation(img_path, xml_path, label_path,
                                                                           img_name[:-4])
                print('boxes_on:', boxes_non)

                if label_statistics == []:
                    continue

                label_count = [label_count[i] + label_statistics[i]
                               for i in range(class_num)]

                img_ori = img_path + '/' + img_name
                img = cv2.imread(img_ori)
                if img is None:
                    continue

                # 把不感兴趣区域替换成颜色随机的图像块
                is_data_ok = True
                if len(boxes_non) > 0:
                    for b in boxes_non:
                        x_min = int(min(b[0], b[1]))
                        x_max = int(max(b[0], b[1]))
                        y_min = int(min(b[2], b[3]))
                        y_max = int(max(b[2], b[3]))

                        if x_max > img.shape[1] or y_max > img.shape[0]:
                            is_data_ok = False
                            break

                        if x_min < 0:
                            x_min = 0
                        if y_min < 0:
                            y_min = 0
                        if x_max > img.shape[1] - 1:
                            x_max = img.shape[1] - 1
                        if y_max > img.shape[0] - 1:
                            y_max = img.shape[0] - 1

                        h = int(y_max - y_min)
                        w = int(x_max - x_min)

                        # 替换为马赛克
                        img[y_min:y_max, x_min:x_max, :] = np.random.randint(0, 255, (h, w, 3))

                # 把不感兴趣多边形区域替换成黑色
                if len(poly_non) > 0:
                    for poly in poly_non:
                        arr = []
                        i = 0

                        while i < len(poly) - 1:
                            arr.append([int(poly[i]), int(poly[i + 1])])
                            i = i + 2

                        arr = np.array(arr)
                        print('arr:', arr)
                        cv2.fillPoly(img, [arr], 0)

                if not is_data_ok:
                    continue

                img_dst = img_path_dst + '/' + img_name

                # 写入预处理后的图片
                print(img_dst)
                cv2.imwrite(img_dst, img)

                list_file.write(img_dst + '\n')
                all_list.write(img_dst + '\n')
            print("label_count ", label_count)

        list_file.close()
    all_list.close()
def find_lane(img):
	global mtx, dist, x_thresh, y_thresh, s_thresh, v_thresh
	global flag_warp, M, M_inv, win_width, win_height, finder

	img_size = (img.shape[1], img.shape[0])

	### Undistort image ###	
	undistorted = cv2.undistort(img, mtx, dist, None, mtx)

	### Gradient and color thresholding ### 
	thresholded = threshold(undistorted, x_thresh, y_thresh, s_thresh, v_thresh)

	### Warp perspective ###
	if flag_warp == False:
		M, M_inv = get_perspective_warp_matrices(img_size)
		flag_warp = True
	warped = cv2.warpPerspective(thresholded, M, img_size, flags=cv2.INTER_LINEAR)

	### Detect centroids of lane line segments ###
	# Use the finder instance to find centroids
	window_centroids = finder.get_smoothed_centroids(warped)
	# Draw diagnostic windows given the centroids
	#window_masked = draw_windows(window_centroids, warped)


	### Fit the centroids into lane lines ###
	# Points that are used to fit the left and right lanes
	left_x, right_x = [], []
	# Go through each level to add the centroids
	for level in range(0, len(window_centroids)):
		left_x.append(window_centroids[level][0])
		right_x.append(window_centroids[level][1])
	# Prepare y values
	yvals = range(0, warped.shape[0])
	res_yvals = np.arange(warped.shape[0]-(win_height/2), 0, -win_height)
	# Fit the lane line
	left_fit, left_fit_x = fit_line(res_yvals, left_x, yvals)
	rigght_fit, right_fit_x = fit_line(res_yvals, right_x, yvals)
	# Generate lane line boundary points
	left_lane = get_line_boundary_points(left_fit_x, yvals, win_width)
	right_lane = get_line_boundary_points(right_fit_x, yvals, win_width)
	# Generate inner lane boundary points
	inner_lane = get_area_boundary_points(left_fit_x, right_fit_x, yvals, win_width)


	### Warp lane line back to original perspective ###
	# Draw lane lines
	lanes = np.zeros_like(img)
	cv2.fillPoly(lanes, [left_lane], color=[255, 0, 0])
	cv2.fillPoly(lanes, [right_lane], color=[0, 0, 255])
	# Draw inner lane area
	cv2.fillPoly(lanes, [inner_lane], color=[0, 255, 0])
	# Make a lane background to accentuate the lanes
	lanes_bkg = np.zeros_like(img)
	cv2.fillPoly(lanes_bkg, [left_lane], color=[255, 255, 255])
	cv2.fillPoly(lanes_bkg, [right_lane], color=[255, 255, 255])
	# Warp back perspective
	lanes_warped_back = cv2.warpPerspective(lanes, M_inv, img_size, flags=cv2.INTER_LINEAR)
	lanes_bkg_warped_back = cv2.warpPerspective(lanes_bkg, M_inv, img_size, flags=cv2.INTER_LINEAR)
	# Draw the warped-back lanes on original image
	base = cv2.addWeighted(img, 1.0, lanes_bkg_warped_back, -1.0, 0.0)
	lanes_on = cv2.addWeighted(base, 1.0, lanes_warped_back, 0.7, 0.0)


	### Calculate the offset of the car from the center of lanes ###
	# Meters per pixel
	m_per_pix_y = 30./720
	m_per_pix_x = 3.7/570
	# Calculate lane center, i.e. the midpoint between left and right lanes
	lane_midpoint = (left_fit_x[-1] + right_fit_x[-1]) / 2
	# Calculate the offset between the lane center and car center
	# which is at the midpoint of image bottom
	car_offset = (lane_midpoint - warped.shape[1]/2) * m_per_pix_x
	# Get the car position relative to the lane center 
	side_pos = "left"
	if car_offset <= 0:
		side_pos = "right"


	### Calculate the curvature of the lane ###
	# Fit new polynomial to lane center in world space
	lane_center = (np.array(left_x) + np.array(right_x))/2
	fit_cr = np.polyfit(res_yvals*m_per_pix_y, lane_center*m_per_pix_x, 2)
	# Calculate radius of curvature
	y_eval = np.max(res_yvals)
	curve_rad = ((1 + (2*fit_cr[0]*y_eval*m_per_pix_y + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])


	# Draw text information of lane curvature and car offset
	cv2.putText(lanes_on, "Radius of Curvature = " + str(round(curve_rad, 3)) + "(m)",
				(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
	cv2.putText(lanes_on, "Vehicle is " + str(abs(round(car_offset, 3))) + "(m) " + side_pos + " of lane center",
				(50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
	
	# Write images
	#write_fname = "output_images/with_info_" + str(i) + '.jpg'
	#cv2.imwrite(write_fname, lanes_on)

	return lanes_on
Example #34
0
def draw_shape(image, shape, dims):
    """Draws a shape from the given specs."""
    # Get the center x, y and the size s
    x, y, s, angle = dims
    colour = (220, 220, 220)
    if shape == 'Baguette':
        #cv2.rectangle(image, (x-s, y-s), (x+s, y+s), colour, -1)

        x1 = -1 * s
        y1 = -0.55 * s
        x2 = s
        y2 = -0.55 * s
        x3 = s
        y3 = 0.55 * s
        x4 = -1 * s
        y4 = 0.55 * s

        x1a, y1a = rotate((x1, y1), angle)
        x2a, y2a = rotate((x2, y2), angle)
        x3a, y3a = rotate((x3, y3), angle)
        x4a, y4a = rotate((x4, y4), angle)

        #print("{0},{1} to {2},{3}".format(x1, y1, x1a, y1a))
        points = np.array([[(x1a, y1a), (x2a, y2a), (x3a, y3a), (x4a, y4a)]],
                          dtype=np.int32)

    elif shape == "Heart":

        x1 = 0.0 * s
        y1 = -0.817 * s
        x2 = -0.786 * s
        y2 = -0.573 * s
        x3 = -0.821 * s
        y3 = -0.153 * s
        x4 = -0.384 * s
        y4 = 0.668 * s
        x5 = 0.0 * s
        y5 = 1.0 * s
        x6 = 0.384 * s
        y6 = 0.668 * s
        x7 = 0.821 * s
        y7 = -0.153 * s
        x8 = 0.786 * s
        y8 = -0.573 * s
        x9 = 0.0 * s
        y9 = -0.817 * s

        x1a, y1a = rotate((x1, y1), angle)
        x2a, y2a = rotate((x2, y2), angle)
        x3a, y3a = rotate((x3, y3), angle)
        x4a, y4a = rotate((x4, y4), angle)
        x5a, y5a = rotate((x5, y5), angle)
        x6a, y6a = rotate((x6, y6), angle)
        x7a, y7a = rotate((x7, y7), angle)
        x8a, y8a = rotate((x8, y8), angle)
        x9a, y9a = rotate((x9, y9), angle)

        points = np.array([[(x1a, y1a), (x2a, y2a), (x3a, y3a), (x4a, y4a),
                            (x5a, y5a), (x6a, y6a), (x7a, y7a), (x8a, y8a),
                            (x9a, y9a)]],
                          dtype=np.int32)

    elif shape == "Round":

        x1 = 0.383 * s
        y1 = -0.924 * s
        x2 = -0.383 * s
        y2 = -0.924 * s
        x3 = -0.924 * s
        y3 = -0.383 * s
        x4 = -0.924 * s
        y4 = 0.383 * s
        x5 = -0.383 * s
        y5 = 0.924 * s
        x6 = 0.383 * s
        y6 = 0.924 * s
        x7 = 0.924 * s
        y7 = 0.383 * s
        x8 = 0.924 * s
        y8 = -0.383 * s
        x9 = 0.383 * s
        y9 = -0.924 * s

        x1a, y1a = rotate((x1, y1), angle)
        x2a, y2a = rotate((x2, y2), angle)
        x3a, y3a = rotate((x3, y3), angle)
        x4a, y4a = rotate((x4, y4), angle)
        x5a, y5a = rotate((x5, y5), angle)
        x6a, y6a = rotate((x6, y6), angle)
        x7a, y7a = rotate((x7, y7), angle)
        x8a, y8a = rotate((x8, y8), angle)
        x9a, y9a = rotate((x9, y9), angle)

        points = np.array([[(x1a, y1a), (x2a, y2a), (x3a, y3a), (x4a, y4a),
                            (x5a, y5a), (x6a, y6a), (x7a, y7a), (x8a, y8a),
                            (x9a, y9a)]],
                          dtype=np.int32)

    elif shape == "Princess":

        x1 = 0.85 * s
        y1 = -0.85 * s
        x2 = 1.0 * s
        y2 = 0.0 * s
        x3 = 0.85 * s
        y3 = 0.85 * s
        x4 = 0.0 * s
        y4 = 1.0 * s
        x5 = -0.85 * s
        y5 = 0.85 * s
        x6 = -1.0 * s
        y6 = 0.0 * s
        x7 = -0.85 * s
        y7 = -0.85 * s
        x8 = 0.0 * s
        y8 = -1.0 * s
        x9 = 0.85 * s
        y9 = -0.85 * s

        x1a, y1a = rotate((x1, y1), angle)
        x2a, y2a = rotate((x2, y2), angle)
        x3a, y3a = rotate((x3, y3), angle)
        x4a, y4a = rotate((x4, y4), angle)
        x5a, y5a = rotate((x5, y5), angle)
        x6a, y6a = rotate((x6, y6), angle)
        x7a, y7a = rotate((x7, y7), angle)
        x8a, y8a = rotate((x8, y8), angle)
        x9a, y9a = rotate((x9, y9), angle)

        points = np.array([[(x1a, y1a), (x2a, y2a), (x3a, y3a), (x4a, y4a),
                            (x5a, y5a), (x6a, y6a), (x7a, y7a), (x8a, y8a),
                            (x9a, y9a)]],
                          dtype=np.int32)

    elif shape == "Oval":

        x1 = 0.7 * s
        y1 = -0.5 * s
        x2 = 1.0 * s
        y2 = 0.0 * s
        x3 = 0.7 * s
        y3 = 0.5 * s
        x4 = 0.0 * s
        y4 = 0.7 * s
        x5 = -0.7 * s
        y5 = 0.5 * s
        x6 = -1.0 * s
        y6 = 0.0 * s
        x7 = -0.7 * s
        y7 = -0.5 * s
        x8 = 0.0 * s
        y8 = -0.7 * s
        x9 = 0.7 * s
        y9 = -0.5 * s

        x1a, y1a = rotate((x1, y1), angle)
        x2a, y2a = rotate((x2, y2), angle)
        x3a, y3a = rotate((x3, y3), angle)
        x4a, y4a = rotate((x4, y4), angle)
        x5a, y5a = rotate((x5, y5), angle)
        x6a, y6a = rotate((x6, y6), angle)
        x7a, y7a = rotate((x7, y7), angle)
        x8a, y8a = rotate((x8, y8), angle)
        x9a, y9a = rotate((x9, y9), angle)

        points = np.array([[(x1a, y1a), (x2a, y2a), (x3a, y3a), (x4a, y4a),
                            (x5a, y5a), (x6a, y6a), (x7a, y7a), (x8a, y8a),
                            (x9a, y9a)]],
                          dtype=np.int32)

    point_shape = points.shape
    #print(point_shape)
    offset = np.zeros((1, 0, 2), dtype=np.int32)

    for i in range(point_shape[1]):
        add = np.array([[(x, y)]])
        #print(add.shape)
        offset = np.append(offset, add)

    shaped_offset = offset.reshape(point_shape)
    #add offset
    new_points = points + shaped_offset

    #draw dark background circle
    cv2.circle(image, (x, y), int(s * 1.2), (0, 0, 0), -1)
    cv2.fillPoly(image, new_points, colour)
    return image
Example #35
0
    def blurImage(self, imagePath):

        try:

            # Create opencv image
            spaceImage = cv2.imread(self.spaceImagePath)
            cvImage = cv2.imread(imagePath)

            # Dimensions
            #height = np.size(cvImage, 0)
            #width = np.size(cvImage, 1)

            # For now, use static points for blurring
            blurAreaPoints = [
                [0, 0],
                [0, 284],
                [67, 217],
                [100, 144],
                [178, 128],
                [240, 40],
                [336, 28],
                [418, 0],
            ]
            blurredAreaPoints = np.array(blurAreaPoints, dtype=np.int32)

            #create a mask template
            srcMask = spaceImage.copy()
            srcMask = cv2.cvtColor(srcMask, cv2.COLOR_BGR2GRAY)
            srcMask.fill(0)

            # White polyfill the mask by blur points
            cv2.fillPoly(srcMask, [blurredAreaPoints], 255)

            # Create region of intrest, of the image, of the blur points
            roi = cvImage[
                np.min(blurredAreaPoints[:, 1]):np.max(blurredAreaPoints[:,
                                                                         1]),
                np.min(blurredAreaPoints[:, 0]):np.max(blurredAreaPoints[:,
                                                                         0])]
            mask = srcMask[
                np.min(blurredAreaPoints[:, 1]):np.max(blurredAreaPoints[:,
                                                                         1]),
                np.min(blurredAreaPoints[:, 0]):np.max(blurredAreaPoints[:,
                                                                         0])]

            # Cut and paste and combine
            invertedMask = cv2.bitwise_not(mask)
            spaceImageBackground = cv2.bitwise_and(roi, roi, mask=invertedMask)
            spaceImageCuttedPart = spaceImage[
                np.min(blurredAreaPoints[:, 1]):np.max(blurredAreaPoints[:,
                                                                         1]),
                np.min(blurredAreaPoints[:, 0]):np.max(blurredAreaPoints[:,
                                                                         0])]
            foregroundPart = cv2.bitwise_and(spaceImageCuttedPart,
                                             spaceImageCuttedPart,
                                             mask=mask)

            # Combine space triangle part and wanted coffee part, save as a final master piece
            destination = cv2.add(spaceImageBackground, foregroundPart)
            cvImageFinal = cvImage.copy()
            cvImageFinal[
                np.min(blurredAreaPoints[:, 1]):np.max(blurredAreaPoints[:,
                                                                         1]),
                np.min(blurredAreaPoints[:, 0]):np.max(blurredAreaPoints[:, 0]
                                                       )] = destination

            # Write resulting images
            cv2.imwrite(imagePath, cvImageFinal)

        except Exception as e:
            print(e)
Example #36
0
# capture and de-rotate the image section corresponding to the bounding rect
# we use this if as depending on the orientation the angle has different meaning
if width[1] > width[0]:
    sub = subimage(threshed2, center,
                   ((90 + add_angle + angle) * math.pi) / 180, int(width[1]),
                   int(width[0]))
else:
    sub = subimage(threshed2, center, ((add_angle + angle) * math.pi) / 180,
                   int(width[0]), int(width[1]))

# grab the 4 corners of the rect for drawing
box = cv2.cv.BoxPoints((center, width, angle))
box = np.int0(box)
cv2.drawContours(img, [box], 0, 0, 2)
'''
# masking - UNUSED
# http://stackoverflow.com/questions/15341538/numpy-opencv-2-how-do-i-crop-non-rectangular-region
mask = np.zeros(img.shape, dtype=np.uint8)
mask[:] = 255
roi_corners = np.array([box], dtype=np.int32)
white = (0, 0, 0)
cv2.fillPoly(mask, roi_corners, white)
# apply the mask
masked_image = cv2.bitwise_or(threshed2, mask)
'''
# the region of interest is output
cv2.imwrite('target.png', sub)

# show images for debugging
cv2.imshow('dni', img)
def draw_lane(img,lane):
	histogram = np.sum(img,axis = 0)
	midpoint = int(histogram.shape[0]/2)
	leftx_base = np.argmax(histogram[:midpoint])
	rightx_base = np.argmax(histogram[midpoint:]) + midpoint


	nwindows = 9
	window_height = np.int(img.shape[0]/nwindows)
	# Identify the x and y positions of all nonzero pixels in the imag
	nonzero = img.nonzero()
	nonzeroy = np.array(nonzero[0])
	nonzerox = np.array(nonzero[1])
	# Current positions to be updated for 

	leftx_current = leftx_base
	rightx_current = rightx_base

	draw_windows = True
	margin = 150
	minpix = 1
	left_lane_inds = []
	right_lane_inds = []

	left_a, left_b, left_c = [],[],[]
	right_a, right_b, right_c = [],[],[]


	for window in range(nwindows):
		win_y_low = img.shape[0] - (window+1)*window_height
		win_y_high = img.shape[0] - window*window_height
		win_xleft_low = leftx_current - margin
		win_xleft_high = leftx_current + margin
		win_xright_low = rightx_current - margin
		win_xright_high = rightx_current + margin
		# Draw the windows on the visualization image
		if draw_windows == True:
			cv2.rectangle(img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
			(100,255,255), 3) 
			cv2.rectangle(img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
			(100,255,255), 3) 
		
		good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
		(nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
		good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
		(nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
		# Append these indices to the lists
		left_lane_inds.append(good_left_inds)
		right_lane_inds.append(good_right_inds)
		# If you found > minpix pixels, recenter next window on their mean position
		if len(good_left_inds) > minpix:
			leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
		if len(good_right_inds) > minpix:        
			rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
		
	# show(img)


	left_lane_inds = np.concatenate(left_lane_inds)
	right_lane_inds = np.concatenate(right_lane_inds)

	# Extract left and right line pixel positions
	leftx = nonzerox[left_lane_inds]
	lefty = nonzeroy[left_lane_inds] 
	rightx = nonzerox[right_lane_inds]
	righty = nonzeroy[right_lane_inds] 

	# Fit a second order polynomial to each
	left_fit = np.polyfit(lefty, leftx, 2)
	right_fit = np.polyfit(righty, rightx, 2)

	ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
	left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
	right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

	color_img = np.zeros_like(lane)



	left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
	right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])

	if len(rpnts)<2:
		rpnts.append(right)
		lpnts.append(left)
	if len(rpnts)==2:
		right = (right+rpnts[0]+rpnts[1])/3
		left = (left+lpnts[0]+lpnts[1])/3
		rpnts[0] = rpnts[1]
		rpnts[1] = right
		lpnts[0] = lpnts[1]
		lpnts[1] = left

	points = np.hstack((left, right))


	for pt in left[0]:
		cv2.circle(color_img ,(int(pt[0]),int(pt[1])), 7, (10,200,10), -1)
	for pt in right[0]:
		cv2.circle(color_img ,(int(pt[0]),int(pt[1])), 7, (10,200,10), -1)

	cv2.fillPoly(color_img, np.int_(points), (255,10,10))
	h,w,c = np.shape(lane)
	inv = inv_perspective_warp(color_img,dst_size=(w,h))
	img = cv2.addWeighted(inv, 0.5,lane,1,0)

	return (img)
    def colorwarp(self,
                  undistort,
                  leftx,
                  lefty,
                  rightx,
                  righty,
                  llane=None,
                  rlane=None):
        #def colorwarp(self, undistort, leftx, lefty, rightx, righty):
        # Quadratic fit coefficients

        # Left Lane Line
        if len(leftx) == 0:
            if llane is not None:
                l_fit = llane.fit_get()
            if l_fit is None and rlane is not None:
                l_fit = rlane.fit_get()
                if l_fit is not None:
                    l_fit[2] -= 540
        else:
            l_fit = np.polyfit(lefty, leftx, 2)

        # when 'Left' lane line tracked
        if llane is not None and l_fit is not None:
            # GET 'l_fit' from cache instead if it is considered invalid
            if not llane.fit_valid(l_fit):
                l_fit = llane.fit_get()

        if l_fit is None:
            return undistort

        # Right Lane Line
        if len(rightx) == 0:
            if rlane is not None:
                r_fit = rlane.fit_get()
            if r_fit is None and llane is not None:
                r_fit = llane.fit_get()
                if r_fit is not None:
                    r_fit[2] += 540
        else:
            r_fit = np.polyfit(righty, rightx, 2)

        # when 'Right' lane linetracked
        if rlane is not None and r_fit is not None:
            # GET 'r_fit' from cache instead if it is considered invalid
            if not rlane.fit_valid(r_fit):
                r_fit = rlane.fit_get()

        if r_fit is None:
            return undistort

        # Smooth L and R
        smooth_lr = True
        if smooth_lr:
            l_fit_s = llane.fit_get()
            if l_fit_s is not None:
                l_fit = (l_fit + l_fit_s) / 2.0

            r_fit_s = rlane.fit_get()
            if r_fit_s is not None:
                r_fit = (r_fit + r_fit_s) / 2.0

        # N.B. From now on:
        # Fitted Lane lines (L/R) are considered smooth (noise rejected)!

        # balance_lr = True
        # if balance_lr:
        #
        #N.B. I found it less appealing to balance L/R curvature in video
        #     due to independent update to Left and Right line (instance)
        #     In other words - async Left and Right line smooth may cause
        #     balancing not effective, SO it is disabled in this function!
        #
        # This average on high order polynomial coefficients is important
        # Implemented to yield in-parallel Left/Right Lane Line curves :)
        m_fit = (l_fit + r_fit) / 2.
        self.q_fit.append(m_fit)

        m_fit = sum(self.q_fit) / len(self.q_fit)

        # Generate Lane line points to both Left and Right
        yvals = np.arange(self.h)  # shrink
        l_fitx = m_fit[0] * yvals**2 + m_fit[1] * yvals + m_fit[
            2] - 100  # center
        r_fitx = l_fitx + 200  #  band

        # Calculate Lane lines Curvature Radius and Center Departure
        rad, dev, head = self.curvature_centeroffs(l_fit, leftx, lefty, r_fit,
                                                   rightx, righty)
        self.q_RDH.append(np.array([rad, dev, head]))
        m_RDH = sum(self.q_RDH) / len(self.q_RDH)
        m_rad = m_RDH[0]
        m_dev = m_RDH[1]
        m_head = m_RDH[2]

        # deploy message memebers
        self.lane_obst.radius = m_rad
        self.lane_obst.centerdev = m_dev
        self.lane_obst.anglerror = m_head

        (h, w) = (self.h, self.w)
        warp_zero = np.zeros((h, w), np.uint8)
        color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

        # Recast the fitted x and y points into usable format for cv2.fillPoly()
        pts_l = np.array([np.transpose(np.vstack([l_fitx, yvals]))])
        pts_r = np.array([np.flipud(np.transpose(np.vstack([r_fitx, yvals])))])
        pts = np.hstack((pts_l, pts_r))

        # Draw the lane onto the warped blank image in GREEN
        cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))

        # Warp the birdeye view color_warp back to original image space using inverse perspective matrix (Minv)
        newwarp = cv2.warpPerspective(color_warp, self.Minv,
                                      (warp_zero.shape[1], warp_zero.shape[0]))

        # Combine the newwarp with the previous undistrorted image
        result = cv2.addWeighted(undistort, 1, newwarp, 0.1, 0)
        # And add text annotations
        cv2.putText(result,"Curvature Radius: " + "{0:8.2f}".format(m_rad) + "  (m)", (200,30), \
                    cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255), 1)
        cv2.putText(result,"Center Departure: " + "{0:8.2f}".format(m_dev) + " (cm)", (200,60), \
                    cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255), 1)
        cv2.putText(result,"Heading Angle(o): " + "{0:8.2f}".format(m_head) + " (deg)", (200,90), \
                    cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255), 1)
        return result
def region_of_interest(img, vertices):
    mask = np.zeros_like(img)
    match_mask_color = 255
    cv2.fillPoly(mask, vertices, match_mask_color)
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image
Example #40
0
    # 多角形描画
    # 白紙の背景を作製
    img = np.full((height, width, 1), np.float(255), dtype=np.uint8)

    # 黒塗りの脂肪領域(多角形)を作製
    outpts = []
    outbaseangle = int(360/outpoints)
    angle = 0 - rand()*outbaseangle*0.2
    while angle < 360:
        angle = angle + outbaseangle
        rad = math.radians(angle)
        r = (outrangev + rand()*(outrangeh - outrangev))/2
        outpts.append([outcenterx + r*np.cos(rad), outcentery + r*np.sin(rad)])
        angle = angle - rand()*outbaseangle*0.2
    pts = np.array(outpts).reshape((-1,1,2)).astype(np.int32)
    img = cv2.fillPoly(img, [pts] , color = 0)

    # 黒塗りの脂肪領域(楕円形)を作製
    img = cv2.ellipse(img, ((outcenterx,outcentery), (outrangev, outrangeh), outangle), np.float(0), thickness=-1)
    

    # 白塗りのロース芯画像多角形1を作製
    loin1pts = []
    loin1baseangle = int(360/loin1points)
    angle = 0 - rand()*loin1baseangle*0.2
    while angle < 360:
        angle = angle + loin1baseangle
        rad = math.radians(angle)
        r = (loin1rangev + rand()*(loin1rangeh - loin1rangev))/2
        loin1pts.append([loin1centerx + r*np.cos(rad), loin1centery + r*np.sin(rad)])
        angle = angle - rand()*loin1baseangle*0.2
Example #41
0
     [997.0439560439561, 342.85714285714283],
     [1027.8131868131868, 335.16483516483515], [1041.0, 334.0659340659341],
     [1057.4835164835165, 346.15384615384613],
     [1069.5714285714284, 365.9340659340659],
     [1062.9780219780218, 397.8021978021978],
     [1049.7912087912086, 409.8901098901099],
     [1025.6153846153845, 428.57142857142856],
     [995.9450549450548, 427.4725274725275],
     [967.3736263736264, 435.16483516483515],
     [957.4835164835165, 447.2527472527472],
     [953.087912087912, 460.4395604395604],
     [947.5934065934066, 474.7252747252747],
     [947.5934065934066, 480.2197802197802],
     [954.1868131868132, 489.010989010989]], np.int32)

cv2.fillPoly(src_mask, [poly_1], (255, 255, 255))

# 这是 飞机 CENTER 所在的地方
center = (700, 400)

# Clone originally
# np.zeros(np.shape(img0), dtype=np.uint8)
# output_ORIGIN = cv2.add(src, dst, mask=src_mask)
# cv2.imwrite("images/original.jpg", output_ORIGIN)

# Clone seamlessly.
output_NORMAL = cv2.seamlessClone(src, dst, src_mask, center, cv2.NORMAL_CLONE)
output_MIXED = cv2.seamlessClone(src, dst, src_mask, center, cv2.MIXED_CLONE)
cv2.imwrite("images/normal.jpg", output_NORMAL)
cv2.imwrite("images/mixed.jpg", output_NORMAL)
Example #42
0
def draw_mask(polygon: list, mask: np.ndarray, val: int):
    area = np.array([polygon])
    # mask = cv2.merge([mask,mask,mask])
    cv2.fillPoly(mask, area, val)
    return mask
Example #43
0
              color=(255, 0, 0),
              thickness=10)
plt.imshow(fix_img)

#  Draw a BLUE TRIANGLE in the middle of the image
plt.figure(5)
fix_img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
vertices = np.array([[200, 730], [600, 730], [390, 350]], dtype=np.int32)
pts = vertices.reshape((-1, 1, 2))
cv2.polylines(fix_img2, [pts], isClosed=True, color=(0, 0, 255), thickness=10)
plt.imshow(fix_img2)

# fill in this triangle
plt.figure(6)
fix_img3 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.fillPoly(fix_img3, [pts], color=(0, 0, 255))
plt.imshow(fix_img3)

# plt.show()


# Create a script that opens the picture and allows you to draw empty red circles with RIGHT MOUSE BUTTON DOWN click
def draw_circle(event, x, y, flags, params):

    if event == cv2.EVENT_RBUTTONDOWN:
        cv2.circle(img, (x, y), 100, (0, 0, 255), 10)


cv2.namedWindow(winname='Puppy')

cv2.setMouseCallback('Puppy', draw_circle)
        dstPts= np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)

        matrix, mask= cv2.findHomography(srcPts,dstPts,cv2.RANSAC,5)
        print(matrix)

        #vamos a buscar el box que vamos a plotear
        pts=np.float32([[0,0],[0,hT],[wT,hT],[wT,0]]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,matrix)
        img2= cv2.polylines(imgWebcam,[np.int32(dst)],True,(255,0,255),3)

        #
        imgWarp = cv2.warpPerspective(imgVideo, matrix, (img2.shape[1], img2.shape[0]))

        #vamos a crear una mascara
        maskNew = np.zeros((imgAug.shape[0],imgAug.shape[1]),np.uint8)
        cv2.fillPoly(maskNew,[np.int32(dst)],(255,255,255))

        maskInv = cv2.bitwise_not(maskNew) 
        imgAug = cv2.bitwise_and(imgAug, imgAug, mask=maskInv)
        #agrego el video en el contorno negro
        imgAug = cv2.bitwise_or(imgWarp, imgAug) 

        #llamo la funcion strackImages() me va a devolver todo en un solo visor
        #imgStacked= stackImages(([imgWebcam,imgVideo,imgTarget],[imgFeatures,imgWarp,imgAug]),0.1)

    cv2.imshow('imgAug',imgAug)
    #cv2.imshow('imgWarp',imgWarp)
    #cv2.imshow('img2',img2)
    #cv2.imshow('imgFeatures',imgFeatures)
    #cv2.imshow('ImgTarget',imgTarget)
    #cv2.imshow('ImgVideo',imgVideo)
# generate masks (gt) from .xml files
import xml.etree.ElementTree as ET
import cv2
import numpy as np
import glob

for filename in glob.glob('TestSet-Annotations/*.xml'):

    tree = ET.parse(filename)
    root = tree.getroot()

    nrows = int(root.find('imagesize')[0].text)
    ncols = int(root.find('imagesize')[1].text)
    mask = np.zeros((nrows, ncols), dtype=np.uint8)

    for obj in root.findall('object'):
        for polygon in obj.findall('polygon'):
            verList = []
            for pt in polygon.findall('pt'):
                x = pt.find('x').text
                y = pt.find('y').text
                verList.append((x, y))
            verList = np.array(verList, np.int32)
            cv2.fillPoly(mask, [verList], 255)

    cv2.imwrite('TestSet-Masks/' + filename[20:-4] + '_masks.jpg', mask)
def process_image(image):
    # NOTE: The output you return should be a color image (3 channel) for processing video below
    # TODO: put your pipeline here,
    gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    #print('This image is ', type(image), ' with dimensions', image.shape)
    # First, do a gaussian filtering to remove noise
    kernel_size = 9
    blur_gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
    # Then, do Canny edge detection
    low_threshold = 50
    high_threshold = 150
    edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
    # Next we'll create a masked edges image using cv2.fillPoly()
    mask = np.zeros_like(edges)
    ignore_mask_color = 255
    # This time we are defining a four sided polygon to mask
    imshape = image.shape
    vertices = np.array([[(0, imshape[0]),
                          (imshape[1] * 1 / 4, imshape[0] * 9 / 16),
                          (imshape[1] * 3 / 4, imshape[0] * 9 / 16),
                          (imshape[1], imshape[0])]],
                        dtype=np.int32)
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    edges = cv2.bitwise_and(edges, mask)
    #plt.imshow(edges)
    #plt.show()
    slope_threshold = 0.5
    # Define the Hough transform parameters for large lines
    # Make a blank the same size as our image to draw on
    rho = 2  # distance resolution in pixels of the Hough grid
    theta = np.pi / 180  # angular resolution in radians of the Hough grid
    threshold = 70  # minimum number of votes (intersections in Hough grid cell)
    min_line_length = 50  #minimum number of pixels making up a line
    max_line_gap = 5  # maximum gap in pixels between connectable line segments
    #line_image = np.copy(image)*0 # creating a blank to draw lines on
    # Run Hough on edge detected image
    # Output "lines" is an array containing endpoints of detected line segments
    lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
                            min_line_length, max_line_gap)
    # Iterate over the output "lines" and draw lines on a blank image
    #remove detected lines from edges
    image2show = np.copy(image)
    for line in lines:
        for x1, y1, x2, y2 in line:
            slope = (float(y2) - float(y1)) / (float(x2) - float(x1))
            #print(slope)
            if (slope > slope_threshold or slope < -slope_threshold):
                cv2.line(image2show, (x1, y1), (x2, y2), (0, 0, 255), 3)
                cv2.line(edges, (x1, y1), (x2, y2), (0, 0, 0), 3)

    result = image2show
    return result
    # Create a "color" binary image to combine with line image
    #color_edges = np.dstack((edges, edges, edges))
    # Define the Hough transform parameters for dotted lines
    # Make a blank the same size as our image to draw on
    rho = 2  # distance resolution in pixels of the Hough grid
    theta = np.pi / 180  # angular resolution in radians of the Hough grid
    threshold = 50  # minimum number of votes (intersections in Hough grid cell)
    min_line_length = 100  #minimum number of pixels making up a line
    max_line_gap = 50  # maximum gap in pixels between connectable line segments
    #line_image = np.copy(image)*0 # creating a blank to draw lines on
    # Run Hough on edge detected image
    # Output "lines" is an array containing endpoints of detected line segments
    lines_dotted = []
    lines_dotted = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
                                   min_line_length, max_line_gap)
    # Iterate over the output "lines" and draw lines on a blank image
    if not lines_dotted is None:
        #print(lines_dotted)
        #if true
        for line in lines_dotted:
            for x1, y1, x2, y2 in line:
                slope = (y2 - y1) / (x2 - x1)
                #print slope
                if (slope > slope_threshold or slope < -slope_threshold):
                    cv2.line(image2show, (x1, y1), (x2, y2), (0, 0, 0), 3)
    # Create a "color" binary image to combine with line image
    #color_edges = np.dstack((edges, edges, edges))
    # Draw the lines on the edge image
    #lines_edges = cv2.addWeighted(color_edges, 0.8, line_image, 1, 0)
    #plt.imshow(image2show)
    #plt.show()
    # you should return the final output (image where lines are drawn on lanes)
    result = image2show
    return result
def fit_lines(binary_warped):
    global left
    global right
    
    # Assuming you have created a warped binary image called "binary_warped"
    # Take a histogram of the bottom half of the image
    histogram = np.sum(binary_warped[binary_warped.shape[0]/2:,:], axis=0)
    # Create an output image to draw on and  visualize the result
    out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    # Find the peak of the left and right halves of the histogram
    # These will be the starting point for the left and right lines
    midpoint = np.int(histogram.shape[0]/2)
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    
    # Choose the number of sliding windows
    nwindows = 9
    # Set height of windows
    window_height = np.int(binary_warped.shape[0]/nwindows)
    # Identify the x and y positions of all nonzero pixels in the image
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    # Current positions to be updated for each window
    leftx_current = leftx_base
    rightx_current = rightx_base
    # Set the width of the windows +/- margin
    margin = 100
    # Set minimum number of pixels found to recenter window
    minpix = 50
    # Create empty lists to receive left and right lane pixel indices
    left_lane_inds = []
    right_lane_inds = []
    
    # Step through the windows one by one
    for window in range(nwindows):
        # Identify window boundaries in x and y (and right and left)
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin
        # Draw the windows on the visualization image
        cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2) 
        cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2) 
        # Identify the nonzero pixels in x and y within the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
        # Append these indices to the lists
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        # If you found > minpix pixels, recenter next window on their mean position
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
    
    # Concatenate the arrays of indices
    left_lane_inds = np.concatenate(left_lane_inds)
    right_lane_inds = np.concatenate(right_lane_inds)
    
    # Extract left and right line pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds] 

    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/700 # meters per pixel in x dimension
    
    # Fit a second order polynomial to each set of lane pixels.
    # Do the fit in pixels and then again in meters for real world space
    # If no lane points were found use last successful fit from previous frame.
    if leftx.shape[0] > 0:
        left.fit = np.polyfit(lefty, leftx, 2)
        left.fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
        left.no_fit_count = 0
    else:
        left.fit = left.last_fit    # use last successful fit from previous frame
        left.fit_cr = left.last_fit_cr
        left.no_fit_count += 1
        
    if rightx.shape[0] > 0:
        right.fit = np.polyfit(righty, rightx, 2)
        right.fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
        right.no_fit_count = 0
    else:
        right.fit = right.last_fit  # use last successful fit from previous frame
        right.fit_cr = right.last_fit_cr
        right.no_fit_count += 1
        
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    y_eval = np.max(ploty) * ym_per_pix

    # Calculate radius of curvature, in meters, of the detected lines
    left.radius_of_curvature = ((1 + (2*left.fit_cr[0]*y_eval + left.fit_cr[1])**2)**1.5) / np.absolute(2*left.fit_cr[0])
    right.radius_of_curvature = ((1 + (2*right.fit_cr[0]*y_eval + right.fit_cr[1])**2)**1.5) / np.absolute(2*right.fit_cr[0])

    # Add sanity check of the lines here.  If any sanity check fails revert to last good frame
    # - Check that they have similar curvature
    # - TODO: Check that they are separated by approximately the right distance horizontally
    # - TODO: Check that they are roughly parallel
    if (((right.radius_of_curvature/left.radius_of_curvature) < 0.1)
        | ((right.radius_of_curvature/left.radius_of_curvature) > 10.0)):
#        print("\r", left.radius_of_curvature,right.radius_of_curvature)
        left.fit = left.last_fit
        left.fit_cr = left.last_fit_cr
        left.radius_of_curvature = ((1 + (2*left.fit_cr[0]*y_eval + left.fit_cr[1])**2)**1.5) / np.absolute(2*left.fit_cr[0]) 
        right.fit = right.last_fit
        right.fit_cr = right.last_fit_cr
        right.radius_of_curvature = ((1 + (2*right.fit_cr[0]*y_eval + right.fit_cr[1])**2)**1.5) / np.absolute(2*right.fit_cr[0]) 
           
    # Generate x and y values for plotting
    left.fitx = left.fit[0]*ploty**2 + left.fit[1]*ploty + left.fit[2]
    right.fitx = right.fit[0]*ploty**2 + right.fit[1]*ploty + right.fit[2]

    # Save last good fit data
    left.last_fit = left.fit
    left.last_fit_cr = left.fit_cr
    right.last_fit = right.fit
    right.last_fit_cr = right.fit_cr

    # show intermediate result if desired
    if 0:    
        # Create an image to draw on and an image to show the selection window
        out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
        window_img = np.zeros_like(out_img)
        # Color in left and right line pixels
        out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
        out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
 
        # Generate a polygon to illustrate the search window area
        # And recast the x and y points into usable format for cv2.fillPoly()
        left_line_window1 = np.array([np.transpose(np.vstack([left.fitx-margin, ploty]))])
        left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left.fitx+margin, ploty])))])
        left_line_pts = np.hstack((left_line_window1, left_line_window2))
        right_line_window1 = np.array([np.transpose(np.vstack([fight.fitx-margin, ploty]))])
        right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([fight.fitx+margin, ploty])))])
        right_line_pts = np.hstack((right_line_window1, right_line_window2))
      
        left_line_line1 = np.array([np.transpose(np.vstack([left.fitx-1, ploty]))])
        left_line_line2 = np.array([np.flipud(np.transpose(np.vstack([left.fitx+1, ploty])))])
        left_line_line_pts = np.hstack((left_line_line1, left_line_line2))
        right_line_line1 = np.array([np.transpose(np.vstack([fight.fitx-1, ploty]))])
        right_line_line2 = np.array([np.flipud(np.transpose(np.vstack([fight.fitx+1, ploty])))])
        right_line_line_pts = np.hstack((right_line_line1, right_line_line2))
      
        # Draw the lane onto the warped blank image
        cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
        cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
        result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
      
        window_img = np.zeros_like(out_img)
        cv2.fillPoly(window_img, np.int_([left_line_line_pts]), (255, 255, 0))
        cv2.fillPoly(window_img, np.int_([right_line_line_pts]), (255, 255, 0))
        result = cv2.addWeighted(result, 1, window_img, 1.0, 0)
          
        plt.imshow(result)
        plt.plot(left.fitx, ploty, color='yellow')
        plt.plot(fight.fitx, ploty, color='yellow')
        plt.xlim(0, 1280)
        plt.ylim(720, 0)
        plt.show()

    return ploty, left, right
Example #48
0
def generate_rbox(im_size, polys, tags):
    h, w = im_size

    # logger.debug("开始生成rbox数据:h:%d,w:%d",h,w)

    # 初始化3个蒙版,都是512x512
    poly_mask = np.zeros((h, w), dtype=np.uint8)
    score_map = np.zeros((h, w), dtype=np.uint8)
    geo_map = np.zeros((h, w, 5), dtype=np.float32)
    # mask used during traning, to ignore some hard areas
    training_mask = np.ones((h, w), dtype=np.uint8)

    # polys.shape => [框数,4,2]
    for poly_idx, poly_tag in enumerate(zip(polys, tags)):
        poly = poly_tag[0]
        tag = poly_tag[1]

        r = [None, None, None, None]
        for i in range(4):
            # np.linalg.norm(求范数):https://blog.csdn.net/hqh131360239/article/details/79061535
            # 这里求范数,就是在求文本框点之间的距离,r得到的是挨着我的点里面最小的那个距离,从第一个开始
            # 挨个算一下每个点,到别人最近的最小距离
            r[i] = min(np.linalg.norm(poly[i] - poly[(i + 1) % 4]),
                       np.linalg.norm(poly[i] - poly[(i - 1) % 4]))
        # score map
        shrinked_poly = shrink_poly(poly.copy(),
                                    r).astype(np.int32)[np.newaxis, :, :]
        cv2.fillPoly(score_map, shrinked_poly, 1)
        cv2.fillPoly(poly_mask, shrinked_poly, poly_idx + 1)  # ???

        # if the poly is too small, then ignore it during training,如果太小,就不参与训练了
        # 终于明白training_mask的妙用了,就是控制那些点不参与训练
        # 宽和高,太小的框,忽略掉
        poly_h = min(np.linalg.norm(poly[0] - poly[3]),
                     np.linalg.norm(poly[1] - poly[2]))
        poly_w = min(np.linalg.norm(poly[0] - poly[1]),
                     np.linalg.norm(poly[2] - poly[3]))
        # if min(poly_h, poly_w) < FLAGS.min_text_size:
        #     logger.debug("文本框的最小边小于%d了(h=%d,w=%d),屏蔽这个框",FLAGS.min_text_size,poly_h,poly_w)
        #     cv2.fillPoly(training_mask,  poly.astype(np.int32)[np.newaxis, :, :],    0)

        # ???
        if tag:
            logger.debug("文本框是一个模糊文本框,屏蔽这个框")
            cv2.fillPoly(training_mask,
                         poly.astype(np.int32)[np.newaxis, :, :], 0)

        # argwhere返回满足条件的数组元的索引
        # 啥意思?poly_mask == (poly_idx + 1)这个条件不理解?我理解这句话没啥用啊???
        xy_in_poly = np.argwhere(poly_mask == (poly_idx + 1))

        # if geometry == 'RBOX':
        # 对任意两个顶点的组合生成一个平行四边形 - generate a parallelogram for any combination of two vertices
        fitted_parallelograms = []
        for i in range(4):
            # 4个点
            p0 = poly[i]  # poly.shape [4,2]
            p1 = poly[(i + 1) % 4]
            p2 = poly[(i + 2) % 4]
            p3 = poly[(i + 3) % 4]

            # 看这张图示:http://www.piginzoo.com/images/20190828/1566987583219.jpg
            # 求拟合曲线的k和b,返回的是 ax+by+c=0的系数表达
            edge = fit_line([p0[0], p1[0]], [p0[1], p1[1]])  #左上,右上 0,1
            backward_edge = fit_line([p0[0], p3[0]],
                                     [p0[1], p3[1]])  #左上,左下 0,3
            forward_edge = fit_line([p1[0], p2[0]], [p1[1], p2[1]])  #右上,右下 1,2

            # 看p2到p0p1的距离 > p3到p0p1的距离
            # 就是看p2,p3谁离直线p0p1远,就选谁画一条平行于p0p1的先作为新矩形的边
            if point_dist_to_line(p0, p1, p2) > point_dist_to_line(p0, p1, p3):
                # 平行线经过p2 - parallel lines through p2,对,就是这个意思
                if edge[1] == 0:
                    edge_opposite = [1, 0, -p2[0]]
                else:
                    # edge[0] = k,
                    # p2[1] - edge[0] * p2[0] = y - k*x = b
                    # edge_opposite实际上就是[k,-1,b],就是那条平行线的k、b
                    edge_opposite = [edge[0], -1, p2[1] - edge[0] * p2[0]]
            else:
                # 经过p3 - after p3
                if edge[1] == 0:
                    edge_opposite = [1, 0, -p3[0]]
                else:
                    edge_opposite = [edge[0], -1, p3[1] - edge[0] * p3[0]]

            new_p0 = p0
            new_p1 = p1
            new_p2 = p2
            new_p3 = p3
            new_p2 = line_cross_point(
                forward_edge, edge_opposite
            )  # 不对啊,edge_opposite是那条平行线,但是forward_edge和forward_edge不一定垂直啊???

            # 再求p0,p3平行于p1p2的距离谁远,然后做平行线,然后
            # 求这条平行线forward_opposite和edge_opposite的交点=> new p3,以及
            # 求这条平行线forward_opposite和edge的交点         => new p0
            # 我勒个去,我怎么觉得我圈出来一个平行四边形,而不是一个矩形啊,颠覆了我的假设认知了
            if point_dist_to_line(p1, new_p2, p0) > point_dist_to_line(
                    p1, new_p2, p3):
                # across p0
                if forward_edge[1] == 0:
                    forward_opposite = [1, 0, -p0[0]]
                else:
                    forward_opposite = [
                        forward_edge[0], -1, p0[1] - forward_edge[0] * p0[0]
                    ]
            else:
                # across p3
                if forward_edge[1] == 0:
                    forward_opposite = [1, 0, -p3[0]]
                else:
                    forward_opposite = [
                        forward_edge[0], -1, p3[1] - forward_edge[0] * p3[0]
                    ]
            new_p0 = line_cross_point(
                forward_opposite,
                edge)  #  求这条平行线forward_opposite和edge的交点         => new p0
            new_p3 = line_cross_point(
                forward_opposite, edge_opposite
            )  # 求这条平行线forward_opposite和edge_opposite的交点=> new p3

            # 果然是平行四边形啊,作者起了这个名字"parallelograms",爱死你了 (^_^)
            fitted_parallelograms.append(
                [new_p0, new_p1, new_p2, new_p3, new_p0])

            # 上面不是画了了一个平行四边形了么?可是,用另外用一个边,也可以画出一个平行四边形啊
            # or move backward edge
            new_p0 = p0
            new_p1 = p1
            new_p2 = p2
            new_p3 = p3
            new_p3 = line_cross_point(backward_edge, edge_opposite)
            if point_dist_to_line(p0, p3, p1) > point_dist_to_line(p0, p3, p2):
                # across p1
                if backward_edge[1] == 0:
                    backward_opposite = [1, 0, -p1[0]]
                else:
                    backward_opposite = [
                        backward_edge[0], -1, p1[1] - backward_edge[0] * p1[0]
                    ]
            else:
                # across p2
                if backward_edge[1] == 0:
                    backward_opposite = [1, 0, -p2[0]]
                else:
                    backward_opposite = [
                        backward_edge[0], -1, p2[1] - backward_edge[0] * p2[0]
                    ]
            new_p1 = line_cross_point(backward_opposite, edge)
            new_p2 = line_cross_point(backward_opposite, edge_opposite)
            fitted_parallelograms.append(
                [new_p0, new_p1, new_p2, new_p3, new_p0])

            # 然后,我得到了2个平行四边形,我勒个去,我猜到了开头(以为要通过不规则四边形找一个规律的四边形),
            # 但是我没猜到结尾(我以为是画个矩形,却尼玛画出平行四边形,还是两个)

        # 找那个最大的平行四边形,恩,可以理解
        areas = [Polygon(t).area for t in fitted_parallelograms]
        parallelogram = np.array(fitted_parallelograms[np.argmin(areas)][:-1],
                                 dtype=np.float32)
        # sort thie polygon
        parallelogram_coord_sum = np.sum(
            parallelogram, axis=1)  #axis=1,什么鬼?是把x、y加到了一起,[[1,1],[2,2]]=>[2,4]
        min_coord_idx = np.argmin(
            parallelogram_coord_sum)  # 实际上是找左上角,一般来讲是x+y最小的是左上角,你别跟我扯极端情况,
        # 我自己画了一下,这事不是那么绝对,但是大部分是别太变态的情况,是这样的
        # 按照那个点当做p0,剩下的点依次编号,重新调整0-3的标号
        parallelogram = parallelogram[[
            min_coord_idx, (min_coord_idx + 1) % 4, (min_coord_idx + 2) % 4,
            (min_coord_idx + 3) % 4
        ]]

        # 算出套在平行四边形外面的框,我觉得里面的算法有问题,等XDJM们帮着我解惑???
        rectange = rectangle_from_parallelogram(parallelogram)

        # 调整一下p0~p3的顺序,并且算出对应的夹角,恩,是的,夹角是在这里算出来的
        rectange, rotate_angle = sort_rectangle(rectange)

        p0_rect, p1_rect, p2_rect, p3_rect = rectange

        # xy_in_poly就是框里面的那些点的x,y坐标,应该很多,挨个算每个点到这个矩形的距离
        # point_dist_to_line,这个函数之前用过,不多说了,最后一个参数是点,前两个参数,线上的2个点
        for y, x in xy_in_poly:
            point = np.array([x, y], dtype=np.float32)
            # top
            geo_map[y, x, 0] = point_dist_to_line(p0_rect, p1_rect, point)
            # right
            geo_map[y, x, 1] = point_dist_to_line(p1_rect, p2_rect, point)
            # down
            geo_map[y, x, 2] = point_dist_to_line(p2_rect, p3_rect, point)
            # left
            geo_map[y, x, 3] = point_dist_to_line(p3_rect, p0_rect, point)
            # angle
            geo_map[y, x, 4] = rotate_angle
    return score_map, geo_map, training_mask
Example #49
0
def process_img(img):
    img = cv2.undistort(img, mtx, dist, None, mtx)
    preprocess_image = np.zeros_like(img[:, :, 0])
    gradx = abs_sobel_thresh(img, orient='x', thresh=(12, 255))
    grady = abs_sobel_thresh(img, orient='y', thresh=(25, 255))

    c_binary = color_thresh(img, sthresh=(100, 255), vthresh=(50, 255))
    preprocess_image[((gradx == 1) & (grady == 1) | (c_binary == 1))] = 255

    img_size = (img.shape[1], img.shape[0])
    bot_width = .76
    mid_width = .08
    height_pct = .62
    bottom_trim = .935
    src = np.float32([
        [img.shape[1] * (.5 - mid_width / 2), img.shape[0] * height_pct],
        [img.shape[1] * (.5 + mid_width / 2), img.shape[0] * height_pct],
        [img.shape[1] * (.5 + bot_width / 2), img.shape[0] * bottom_trim],
        [img.shape[1] * (.5 - bot_width / 2), img.shape[0] * bottom_trim],
    ])
    offset = img.shape[1] * .25
    dst = np.float32([[offset, 0], [img.shape[1] - offset, 0],
                      [img.shape[1] - offset, img.shape[0]],
                      [offset, img.shape[0]]])
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    warped = cv2.warpPerspective(preprocess_image,
                                 M,
                                 img_size,
                                 flags=cv2.INTER_LINEAR)

    window_width = 25
    window_height = 80
    curve_centers = Tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=25,
                            My_ym=10 / 720,
                            My_xm=4 / 384,
                            Mysmooth_factor=15)

    window_centroids = curve_centers.find_window_centroids(warped)

    l_points = np.zeros_like(warped)
    r_points = np.zeros_like(warped)

    leftx = []
    rightx = []

    for level in range(len(window_centroids)):
        leftx.append(window_centroids[level][0])
        rightx.append(window_centroids[level][1])
        l_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][0], level)
        r_mask = window_mask(window_width, window_height, warped,
                             window_centroids[level][1], level)

        l_points[(l_points == 255) | (l_mask == 1)] = 255
        r_points[(r_points == 255) | (r_mask == 1)] = 255

    # Draw
    # template = np.array(r_points+l_points, np.uint8)
    # zero_channel=np.zeros_like(template)
    # template = np.array(cv2.merge((zero_channel, template, zero_channel)), np.uint8)
    # warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)
    # result = cv2.addWeighted(warpage, 1, template, 0.5, 0.0)
    # result = warped

    yvals = range(0, warped.shape[0])
    res_yvals = np.arange(warped.shape[0] - (window_height / 2), 0,
                          -window_height)

    left_fit = np.polyfit(res_yvals, leftx, 2)
    left_fitx = left_fit[0] * yvals * yvals + left_fit[1] * yvals + left_fit[2]
    left_fitx = np.array(left_fitx, np.int32)

    right_fit = np.polyfit(res_yvals, rightx, 2)
    right_fitx = right_fit[0] * yvals * yvals + right_fit[
        1] * yvals + right_fit[2]
    right_fitx = np.array(right_fitx, np.int32)

    left_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx - window_width / 2,
                                left_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    right_lane = np.array(
        list(
            zip(
                np.concatenate((right_fitx - window_width / 2,
                                right_fitx[::-1] + window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)
    inner_lane = np.array(
        list(
            zip(
                np.concatenate((left_fitx + window_width / 2,
                                right_fitx[::-1] - window_width / 2),
                               axis=0),
                np.concatenate((yvals, yvals[::-1]), axis=0))), np.int32)

    road = np.zeros_like(img)
    road_bkg = np.zeros_like(img)
    cv2.fillPoly(road, [left_lane], color=[255, 0, 0])
    cv2.fillPoly(road, [right_lane], color=[0, 0, 255])
    cv2.fillPoly(road, [inner_lane], color=[0, 255, 0])
    cv2.fillPoly(road_bkg, [left_lane], color=[255, 255, 255])
    cv2.fillPoly(road_bkg, [right_lane], color=[255, 255, 255])

    road_warped = cv2.warpPerspective(road,
                                      Minv,
                                      img_size,
                                      flags=cv2.INTER_LINEAR)
    road_warped_bkg = cv2.warpPerspective(road_bkg,
                                          Minv,
                                          img_size,
                                          flags=cv2.INTER_LINEAR)

    base = cv2.addWeighted(img, 1.0, road_warped_bkg, -1.0, 0.0)
    result = cv2.addWeighted(base, 1.0, road_warped, .7, 0.0)

    ym_per_pix = curve_centers.ym_per_pix
    xm_per_pix = curve_centers.xm_per_pix

    curve_fit_cr = np.polyfit(
        np.array(res_yvals, np.float32) * ym_per_pix,
        np.array(leftx, np.float32) * xm_per_pix, 2)
    curverad = (
        (1 +
         (2 * curve_fit_cr[0] * yvals[-1] * ym_per_pix + curve_fit_cr[1])**2)**
        1.5) / np.absolute(2 * curve_fit_cr[0])
    camera_center = (left_fitx[-1] + right_fitx[-1]) / 2
    center_diff = (camera_center - warped.shape[1] / 2) * xm_per_pix
    side_pos = 'left'
    if center_diff <= 0:
        side_pos = 'right'

    cv2.putText(result,
                'Radius of Curvature = ' + str(round(curverad, 3)) + '(m)',
                (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
    cv2.putText(
        result, 'Vehicle is ' + str(abs(round(center_diff, 3))) + 'm ' +
        side_pos + ' of center', (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
        (255, 255, 255), 2)

    return result
def process_image(img):
    global frame_num
    
    if img.shape[2] == 4:
        img = img[:,:,0:3]

    dst = np.copy(img)

# #   Save each frame of video
#     fn = './project_video/project_video' + str(frame_num) + '.jpg'
#     mpimg.imsave(fn, img)
    
    # distortion correction
    dst = cv2.undistort(dst, mtx, dist, None, mtx)

    # apply color and gradient threshold pipeline to image
    dst = pipeline(dst)

    # extract only the region of interest in front of the vehicle
    dst = region_of_interest(dst)
        
    # warp the image to a topdown view
    dst = warp(dst, M)
  
    ploty, leftLine, rightLine = fit_lines(dst)
    
    # Draw lane image and warp it back to original image space using inverse perspective matrix (Minv)
    lane = draw_lane(dst, ploty, leftLine.fitx, rightLine.fitx)
    lane_undist = warp(lane, Minv)

    # Combine lane image with the original image
    dst = cv2.addWeighted(img, 1, lane_undist, 0.3, 0)

#     # Draw center line, warp it back to original image space using inverse perspective matrix (Minv), and combine
#     ctr_line = draw_ctr_line(dst, ploty, left_fitx, right_fitx)
#     ctr_line_undist = warp(ctr_line, Minv)
#     dst = cv2.addWeighted(dst, 1, ctr_line_undist, 1.0, 0)

#    dst = np.dstack((dst, dst, dst))*255

    # Write radii as text overlayed on image
    font = cv2.FONT_HERSHEY_SIMPLEX
    rad = (leftLine.radius_of_curvature + rightLine.radius_of_curvature)/2
    msg = 'radius: ' + str(int(rad)) + 'm'
    cv2.putText(dst,msg,(550,540), font, 0.85,(0,0,255),2)
    
    # Write deviation from center as text overlayed on image
    xm_per_pix = 3.7/700
    deviation = 640. - (left.fitx[710] + (right.fitx[710] - left.fitx[710])/2)
    deviation = deviation * xm_per_pix
    msg = 'deviation: ' + "{:2.2f}".format(deviation) + 'm'
    cv2.putText(dst,msg,(520,680), font, 0.85,(0,0,255),2)

    msg = 'frame: ' + str(int(frame_num))
    cv2.putText(dst,msg,(50,50), font, 0.85,(0,0,255),2)
    
    # Draw car's center mark at bottom of screen
    marker = np.array([ [632,719], [640,700], [648,719] ], np.int32)
    cv2.fillPoly(dst, [marker], (0,0,0))

    frame_num += 1
            
    return dst
Example #51
0
        cv2.line(frame, (leftx2, lefty2), (rightx2, righty2), (0, 255, 255),
                 10)

    if averaged_lines is not None:
        leftx1 = averaged_lines[0][0][0]
        lefty1 = averaged_lines[0][0][1]
        leftx2 = averaged_lines[0][0][2]
        lefty2 = averaged_lines[0][0][3]
        rightx1 = averaged_lines[1][0][0]
        righty1 = averaged_lines[1][0][1]
        rightx2 = averaged_lines[1][0][2]
        righty2 = averaged_lines[1][0][3]

    rectangle = np.array([[(leftx1, lefty1), (leftx2, lefty2),
                           (rightx2, righty2), (rightx1, righty1)]], np.int32)
    cv2.fillPoly(line_image, rectangle, (0, 100, 0), 255)

    lineequations = lineequation.getlines(averaged_lines)

    if lineequations is not None and lineequations[
            0] is not None and lineequations[1] is not None:

        leftgradient = lineequations[0][0]
        leftyintercept = lineequations[0][1]

        rightgradient = lineequations[1][0]
        rightyintercept = lineequations[1][1]

    initial_w = cap.get(3)
    initial_h = cap.get(4)
Example #52
0
def polygon_to_mask(poly, h, w):
    mask = np.zeros((h, w), dtype=np.uint8)
    cv2.fillPoly(mask, [np.round(poly).astype(int)], 1)
    return mask
Example #53
0
def lanes(inputImg, winWidth):
    segments = 20
    h = inputImg.shape[0]
    step = int(np.ceil(inputImg.shape[1] / segments))
    laneLineLx = []
    laneLineLy = []
    laneLineRx = []
    laneLineRy = []

    xNzR = []
    yNzR = []

    xNzL = []
    yNzL = []

    for row in reversed(range(0, h, step)):
        ImgSeg = inputImg[row:row + step, :]
        #        print(row,row+step)
        histogram = np.sum(ImgSeg[:, :], axis=0)
        peaks, _ = find_peaks(histogram, distance=10)
        peaksR = peaks[peaks >= histogram.shape[0] / 2]
        peaksL = peaks[peaks < histogram.shape[0] / 2]
        #        plt.plot(histogram)
        peaksLtemp = histogram.shape[0] / 2
        if peaksL.size > 0:
            #            plt.plot(peaksL[-1],histogram[peaksL[-1]], 'r+')
            laneLineLx.append(peaksL[-1])
            laneLineLy.append(row + step / 2)
            nzXY = ImgSeg[:, peaksL[-1] - winWidth:peaksL[-1] +
                          winWidth].nonzero()
            xNzL.append(nzXY[1] + peaksL[-1] - winWidth)
            yNzL.append(nzXY[0] + row)
            peaksLtemp = peaksL[-1]
        if peaksR.size > 0:
            #                plt.plot(peaksR[0],histogram[peaksR[0]], 'bo')
            if 450 + peaksLtemp > peaksR[0]:
                #                print("dist = " , peaksLtemp - peaksR[0])
                laneLineRx.append(peaksR[0])
                laneLineRy.append(row + step / 2)
                nzXY = ImgSeg[:, peaksR[0] - winWidth:peaksR[0] +
                              winWidth].nonzero()
                xNzR.append(nzXY[1] + peaksR[0] - winWidth)
                yNzR.append(nzXY[0] + row)


#        plt.show()

    if len(xNzL) <= 0 or len(xNzR) <= 0:
        return inputImg, False, 0, "Straight"

    inputImg = cv2.cvtColor(inputImg, cv2.COLOR_GRAY2BGR)
    for i, j in zip(np.hstack(xNzL), np.hstack(yNzL)):
        cv2.circle(inputImg, (i, j), 2, (0, 0, 255), -1)
    for i, j in zip(np.hstack(xNzR), np.hstack(yNzR)):
        cv2.circle(inputImg, (i, j), 2, (0, 255, 0), -1)

    with warnings.catch_warnings():
        warnings.filterwarnings('error')
        try:
            left = np.polyfit(np.hstack(yNzL), np.hstack(xNzL), 2)
            right = np.polyfit(np.hstack(yNzR), np.hstack(xNzR), 2)
            y = np.linspace(0, h, h).astype(int)
            xL = [
                int(left[2] + left[1] * (yy) + left[0] * (yy**2)) for yy in y
            ]
            xR = [
                int(right[2] + right[1] * (yy) + right[0] * (yy**2))
                for yy in y
            ]
        except np.RankWarning:
            return inputImg, False, 0, "Straight"

    pts = []
    pts = np.column_stack((xL, y))
    pts = np.vstack((pts, np.flip(np.column_stack((xR, y)), 0)))
    laneShade = np.zeros_like(inputImg)
    for i, j, k in zip(xL, xR, y):
        cv2.circle(inputImg, (i, k), 2, (255, 0, 0), -1)
        cv2.circle(inputImg, (j, k), 2, (255, 0, 0), -1)

    pts2 = []
    pts2.append(pts)

    cv2.fillPoly(laneShade, np.array(pts2), (100, 100, 255))
    for i, j, k in zip(xL, xR, y):
        cv2.circle(laneShade, (i, k), 5, (255, 0, 0), -1)
        cv2.circle(laneShade, (j, k), 5, (255, 0, 0), -1)
    inputImg = cv2.resize(inputImg, (0, 0), fx=0.5, fy=0.4)
    #    cv2.imshow("cor", laneShade)

    #    cv2.imshow("cor1", inputImg)

    curvature = rad_of_curvature(np.column_stack((xL, y)),
                                 np.column_stack((xR, y)))

    d1 = direction(xL[0], y[0], xL[int(len(xL) / 2)], y[int(len(xL) / 2)],
                   xL[-1], y[-1])
    d2 = direction(xR[0], y[0], xR[int(len(xR) / 2)], y[int(len(xR) / 2)],
                   xR[-1], y[-1])
    dT = (d1 + d2) / 2
    print(dT)
    if abs(dT) < 7000:
        dirTurn = "Straight"
    elif dT > 7000:
        dirTurn = "Right Turn"
    else:
        dirTurn = "Left Turn"

    return laneShade, True, curvature, dirTurn
Example #54
0
def convex_hull(frame, yt, ht, colour, centroid):  #, #y_val):
    x, y = frame.shape[:2]
    _, contours, _ = cv2.findContours(frame, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)
    hull = [cv2.convexHull(c) for c in contours]
    #cv2.drawContours(r_frame,hull,-1,(255,0,255))

    _, contours, heirarchy = cv2.findContours(frame, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
    hull = [cv2.convexHull(c) for c in contours]
    cv2.drawContours(r_frame[base_cut + yt:base_cut + yt + y, 0:1280], hull,
                     -1, (255, 0, 0))
    area = 0
    for c in contours:
        thisArea = cv2.contourArea(c)
        if thisArea > area:
            area = thisArea
#    for c in contours:
#calibrate for surfers
# x, y, w, h = cv2.boundingRect(c)
#if h<10 and w<40:
#   print('swell detected')

#print('present area size',area)
    for c in contours:
        if cv2.contourArea(c) == area:
            cv2.fillPoly(r_frame[base_cut + yt:base_cut + yt + y, 0:1280],
                         pts=[c],
                         color=(colour))
            M = cv2.moments(c)

            # calculate x,y coordinate of center
            try:
                cX = int(M["m10"] / M["m00"])
                cY = yt + int(ht / 2)
            except:
                cX = int(M["m10"] / 1)
                cY = int(M["m01"] / 1)

            # put text and highlight the center
            if centroid == True:
                cv2.circle(r_frame, (cX, base_cut + cY), 5, (0, 0, 255), -1)
                cv2.putText(r_frame, "centroid", (cX - 25, base_cut + cY - 25),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                Drift_tracker_X.append(cX)
                Drift_tracker_Y.append(base_cut + cY)
    #
    #store in an array and find the largest area for peak wave size

# cv2.imshow('new',frame[y_val:roi_base,0:1280])#new image
#cv2.imshow('old',frame[0:y_val,0:y])#old image

    hull = [cv2.convexHull(c) for c in contours]

    x1, y1, w1, h1 = cv2.boundingRect(swell_cons)

    # final =cv2.drawContours(frame,hull,-1,(255,0,255))
    # cv2.drawContours(r_frame,hull,-1,(255,0,255))
    #cv2.imshow('Sub_Hull',r_frame)

    return area
Example #55
0
def preprocess_external_data(annot_path, tissue_path, train_path):
    """
    Preprocesses external data and puts them in training directory.
    Modified the codes provided in
    https://www.kaggle.com/voglinio/external-h-e-data-with-mask-annotations
    """
    if os.path.exists(annot_path) and os.path.exists(
            tissue_path) and os.path.exists(train_path):
        for annot_num, annotation_tif in tqdm(
                enumerate(os.listdir(tissue_path))):

            image_id = annotation_tif.split('.')[0]

            img = cv2.imread(os.path.join(tissue_path, annotation_tif))

            annotation_xml = image_id + '.xml'
            tree = minidom.parse(os.path.join(annot_path, annotation_xml))
            regions = tree.getElementsByTagName(
                "Regions")[0].getElementsByTagName("Region")

            mask = np.zeros((img.shape[0], img.shape[1]))

            for mm, region in enumerate(regions):

                vertices = region.getElementsByTagName("Vertex")
                polygon = []
                for vertex in vertices:
                    x = float(vertex.attributes["X"].value)
                    y = float(vertex.attributes["Y"].value)
                    polygon.append([x, y])
                polygon = np.array(polygon)
                polygon = polygon.reshape((-1, 1, 2))
                polygon = np.round(polygon)
                polygon = polygon.astype(np.int32)
                polygon.shape
                cv2.fillPoly(mask, [polygon], mm + 1)

            for mm, (img_piece, mask_piece) in enumerate(
                    zip(split_image(img), split_image(mask))):

                path = train_path + image_id + '_' + str(mm)
                if not os.path.isdir(path):
                    os.mkdir(path)
                if not os.path.isdir(path + '/images/'):
                    os.mkdir(path + '/images/')

                if not os.path.isdir(path + '/masks/'):
                    os.mkdir(path + '/masks/')
                fname = path + '/images/' + image_id + '_' + str(mm) + '.png'
                cv2.imwrite(fname, img_piece)

                mask_piece = label(mask_piece)

                mask_piece = np.repeat(mask_piece[:, :, np.newaxis],
                                       mask_piece.max(),
                                       axis=-1)
                mask_piece = np.equal(
                    mask_piece,
                    np.ones_like(mask_piece) *
                    np.arange(1,
                              mask_piece.max() + 1)).astype(np.uint8)

                fname = path + '/masks/' + image_id + '_' + str(mm) + '.h5'
                with h5py.File(fname, "w") as hf:
                    hf.create_dataset("arr", data=mask_piece)

    else:
        print('One of the paths provided does not exist')
Example #56
0
        contours, _ = cv2.findContours(th3, R, P)

        maxi = 0
        for cnt in contours:
            if cv2.contourArea(cnt) > maxi:
                maxi = cv2.contourArea(cnt)

        blanck2 = blanck_picture(img)
        blanck2 = cv2.cvtColor(blanck2, cv2.COLOR_BGR2GRAY)

        for cnts in contours:
            if cv2.contourArea(cnts) < maxi and\
               cv2.contourArea(cnts) > 100:
                #print(cv2.contourArea(cnt))
                cv2.drawContours(blanck2, [cnts], -1, (255, 255, 255), 1)
                cv2.fillPoly(blanck2, pts=[cnts], color=(255, 255, 255))

        #show_picture("blanck2", blanck2, 0, "")

        for x in range(0, blanck2.shape[0]):
            for y in range(0, blanck2.shape[1]):
                if blanck2[x, y] == 0:
                    copy[x, y] = 255, 255, 255

        copy = cv2.cvtColor(copy, cv2.COLOR_BGR2GRAY)
        th3 = cv2.adaptiveThreshold(copy, 255, MG, T, 11, 5)
        #show_picture("copycopycopy", th3, 0, "")

        blanck3 = blanck_picture(img)
        contours, _ = cv2.findContours(th3, R, P)
Example #57
0
def main(cam_id, type):
    """
    由于眼部检测的不稳定性,这里采用在已经识别出的人脸范围内假定的眼睛区域作为眼睛区域的判断
    采用 关键点检测 加 伪眼部光流检测
    关键点静止但眼部光流明显时判定为活体
    这个主要判断 眨眼 来做为活体检测的依据
    更换为双摄像头
    并判断
    1、电子设备(普通摄像头中有人脸区域而红外摄像头中没有)
    2、照片(眼睛区域关键点位移的方差与眼睛外部的方差 差值小于阈值时判定为照片,但有很大程度会把真人识别成照片)
    3、通过,活体(。。。)
    :param cam_id: 摄像头ID
    :param type: 类型(color:Visible Light 可见光, gray:infrared 红外)
    :return:
    """

    print(__doc__)
    cap = cv2.VideoCapture(cam_id)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('D:/data.avi', fourcc, 30, (640, 480))
    f_cascade = cv2.CascadeClassifier("C:/opencv/opencv/build/etc/haarcascades/haarcascade_frontalface_alt2.xml")
    e_cascade = cv2.CascadeClassifier("C:\\opencv\\opencv\\build\etc\\haarcascades\\haarcascade_eye.xml")
    ret, prev = cap.read()
    prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    feature_params = dict(maxCorners=500, qualityLevel=0.3, minDistance=7, blockSize=7)

    tracks = []
    frame_index = 0  # 总体的帧数, 用于判断电子设备
    face_frame_index = 0  # 脸部区域的帧数,用于 关键点轨迹
    detect_interval = 3
    track_len = 10
    msg_show_success = 0 # 包含光流和关键点判断通过的
    msg_show_opt = 0  # 通过信息显示帧数
    msg_show_key = 0  # 通过信息显示帧数
    msg_show_success_f = 0  # 没有通过包含光流和关键点判断通过的
    msg_show_opt_f = 0  # 没有通过通过信息显示帧数
    msg_show_key_f = 0  # 没有通过通过信息显示帧数
    has_face = False
    sustain = 10    # 信息持续时间

    # 存储每一帧的光流
    eye_flow_lines_t = []

    while True:
        if cv2.waitKey(1) == 27:  # Esc for exit
            break
        t = clock()
        ret, img = cap.read()
        img = cv2.flip(img, 1)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        gray = cv2.equalizeHist(gray)
        frame_index += 1
        rectangles = detect(gray, f_cascade)

        mask = np.zeros_like(gray)  # 设置关键点遮罩

        if len(rectangles) == 1:  # 限制一张人脸
            share[type][frame_index % el_check_count] = 1  # 识别出人脸后将值设置为1
            if not (has_face and True):
                tracks = []
            has_face = True
            for rectangle in rectangles:
                rx0, ry0, rx1, ry1 = rectangle
                # if not (140 < rx1 - rx0 < 160 and 140 < ry1 - ry0 < 160):  # 限定人脸识别框的大小
                #     continue
                draw_rectangle(img, rectangle, color=(0, 225, 0))  # 人脸范围
                rectangles_eye = detect(gray[ry0:ry1, rx0:rx1], e_cascade)  # 获取眼睛范围
                # draw_rectangles(img[ry0:ry1, rx0:rx1], rectangles_eye, color=(255, 0, 225))

                # 眼部光流场功能
                eye_flow_lines = []
                # for erx0, ery0, erx1, ery1 in rectangles_eye:
                #     eye_flow = opt_flow(prev_gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                         gray[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1])  # get opt flow
                #     eye_flow_lines.append(draw_flow(img[ry0:ry1, rx0:rx1][ery0:ery1, erx0:erx1],
                #                                     eye_flow, step=4))  # 显示光流点

                # 假眼部位置,假设脸纵向上部1/4位置到2/4位置及 横向左部1/6到5/6位置为眼部,以抵消眼部识别不能每次都有效地问题

                face_h = ry1 - ry0
                face_w = rx1 - rx0
                face_hs = face_h / 4
                face_he = face_h / 2
                face_ws = face_w / 6
                face_we = face_w / 6 * 5
                eye_flow = opt_flow(prev_gray[ry0:ry1, rx0:rx1][face_hs:face_he, face_ws:face_we],
                                    gray[ry0:ry1, rx0:rx1][face_hs:face_he, face_ws:face_we])
                eye_flow_lines.append(
                    draw_flow(img[ry0:ry1, rx0:rx1][face_hs:face_he, face_ws:face_we], eye_flow, step=4))

                eye_sorted = []  # 排序后的长度集合(眼睛)
                eye_sorted2 = []
                for lines in eye_flow_lines:
                    mds = []
                    for (x1, y1), (x2, y2) in lines:
                        md = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
                        mds.append(md)
                        eye_sorted2.append(md)
                    eye_sorted.append(sorted(mds, reverse=True))
                    eye_flow_lines_t.append(eye_sorted2)  # 存储每一帧的光流位移信息
                # 绘制关键点轨迹
                # 会删除位移较大的关键点
                # 不影响其他的鉴别
                if len(tracks) > 0:
                    img0, img1 = prev_gray, gray
                    p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
                    p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
                    p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
                    d = abs(p0 - p0r).reshape(-1, 2).max(-1)
                    good = d < 0.5
                    new_tracks = []
                    for tr, (x, y), good_flag in zip(tracks, p1.reshape(-1, 2), good):
                        if not good_flag:
                            continue
                        if not (rx0 < x < rx1 and ry0 < y < ry1):
                            continue
                        tr.append((x, y))
                        if len(tr) > track_len:
                            del tr[0]
                        new_tracks.append(tr)
                        cv2.circle(img, (x, y), 2, (0, 255, 0), -1)
                    tracks = new_tracks
                    cv2.polylines(img, [np.int32(tr) for tr in tracks], False, (0, 255, 0))
                    # draw_str(img, (20, 20), 'track count: %d' % len(tracks))

                # 限定人脸为兴趣区域
                cv2.fillPoly(mask, np.array([[[rx0, ry0], [rx1, ry0], [rx1, ry1], [rx0, ry1]]]), (255, 255, 255))
                for x, y in [np.int32(tr[-1]) for tr in tracks]:
                    cv2.circle(mask, (x, y), 5, 0, -1)  # 排除上一次的关键点

                if face_frame_index % detect_interval == 0:
                    # print('**************** start ***************')
                    l_sorted = []
                    l_sorted_eye = []  # 眼睛区域的关键点
                    l_sorted_out = []  # 眼睛外部的关键点

                    l_tmp = []
                    l_tmp_eye = []
                    l_tmp_out = []
                    for tr in tracks:
                        (x0, y0) = tr[0]
                        (x1, y1) = tr[-1]

                        if rx0 + face_ws < x1 < rx0 + face_we and ry0 + face_hs < y1 < ry1 + face_he:
                            l_tmp_eye.append(round(math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2), 2))
                        else:
                            l_tmp_out.append(round(math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2), 2))

                        l = round(math.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2), 2)
                        l_tmp.append(l)
                        # if l > 0:
                        # print(round(math.atan(abs((y1 - y0) / (x1 - x0))) / math.pi * 180, 2), end=':')
                        # print(l, end='\t')
                    # print('\n+++++++++++++++')

                    l_sorted = sorted(l_tmp, reverse=True)
                    l_sorted_eye = sorted(l_tmp_eye, reverse=True)
                    l_sorted_out = sorted(l_tmp_out, reverse=True)
                    if len(l_sorted_eye) > 3 and len(l_sorted_out) > 3:
                        print(np.var(l_sorted_eye[:3]))
                        print(np.var(l_sorted_out[:3]))
                        print(abs(np.var(l_sorted_eye[:3]) - np.var(l_sorted_out[:3])))
                        print('=======')
                    if len(l_sorted_out) > 3 and len(l_sorted_eye) > 3 \
                            and l_sorted_out[0] < 1 and l_sorted_eye[0] > 1 \
                            and l_sorted_eye[0] - l_sorted_out[0] > 1:
                        # print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')
                        msg_show_key = sustain
                        msg_show_success = sustain
                    elif len(l_sorted_out) > 3 and len(l_sorted_eye) > 3 \
                            and abs(np.var(l_sorted_eye[:3]) - np.var(l_sorted_out[:3])) < 0.0005:
                        msg_show_key_f = sustain
                        msg_show_success_f = sustain
                    # ========打印前十个=========================
                    # if True:
                    #     for i, md2 in enumerate(eye_sorted):
                    #         count = 0
                    #         print('眼睛', str(i + 1), end=':\t')
                    #         for md in md2:
                    #             count += 1
                    #             if count > 150:
                    #                 break
                    #             print(round(md, 2), end=',')
                    #         print()
                    #     print('###################')

                    # 活体检测
                    np_eye = np.array(sorted(eye_sorted2, reverse=True)[:30])
                    np_eye = np_eye[np_eye > 0]
                    np_l = np.array(l_sorted[:10])

                    # print(np_eye.size, '+++++', np_l.size)
                    if np_eye.size != 0 and np_l.size != 0:
                        flow_pre = np_eye[np_eye > 2].size * 1.0 / np_eye.size
                        ln_pre = np_l[np_l > 2].size * 1.0 / np_l.size
                        # print(flow_pre, '---', ln_pre)
                        if 0.8 > flow_pre > 0.05 and ln_pre < 0.2:
                            msg_show_opt = sustain
                            msg_show_success = sustain
                            # print('yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy')
                        # elif flow_pre < 0.05 and ln_pre < 0.2:
                        #     msg_show_opt_f = sustain
                        #     msg_show_success_f = sustain
                    # print('**************** end ***************')
                    # 判断关键点
                    p = cv2.goodFeaturesToTrack(gray, mask=mask, **feature_params)
                    if p is not None:
                        for x, y in np.float32(p).reshape(-1, 2):
                            tracks.append([(x, y)])

            face_frame_index += 1
        else:
            has_face = False
            share[type][frame_index % el_check_count] = 0  # 没识别出人脸(或识别出多个)后将值设置为0

        prev_gray = gray
        dt = clock() - t
        # draw_str(img, (20, 20), 'time: %.1f ms' % (dt * 1000))
        # if msg_show_key > 0:
        #     draw_str(img, (450, 20), 'YES by KEY', front=(0, 0, 255))
        #     msg_show_key -= 1
        # if msg_show_opt > 0:
        #     draw_str(img, (300, 20), 'YES by OPT', front=(0, 0, 255))
        #     msg_show_opt -= 1
        if sum(share['color']) > el_check_count * 0.8 and sum(share['gray']) == 0:   # color中80%的帧里面识别出人脸,gray中大于80%的帧中没有识别出人脸
            draw_str(img, (400, 30), 'Electronic', front=(0, 0, 255), size=2)
            msg_show_success = 0
            msg_show_success_f = 0
        elif msg_show_success > 0:
            draw_str(img, (400, 30), 'Pass', front=(0, 255, 0), size=2)
            msg_show_success_f = 0
        elif msg_show_success_f > 0:
            draw_str(img, (400, 30), 'Photo', front=(0, 0, 255), size=2)
            msg_show_success = 0

        msg_show_success -= 1
        msg_show_success_f -= 1

        cv2.imshow(type + str(cam_id), img)
        out.write(img)
        # cv2.imshow('mask', mask)
    cap.release()
    out.release()
    cv2.destroyAllWindows()
Example #58
0
def roi(img,
        vertices):  #creates a region of interest based on the vertices given
    mask = np.zeros_like(img)
    cv2.fillPoly(mask, vertices, 255)
    masked = cv2.bitwise_and(img, mask)
    return masked
Example #59
0
 cv2.setMouseCallback(files[i], mark_number_plate)
 while (True):
     # show image so that user can mark number plate
     cv2.imshow(str(files[i]), image)
     op_status = cv2.waitKey(20) & 0xFF
     # if user press 'm' key
     if (op_status == ord('m')):
         # create copy of image
         mask_image = image.copy()
         # put zeros in all cells
         mask_image[:, :, 0] = np.zeros([image.shape[0], image.shape[1]])
         mask_image[:, :, 1] = np.zeros([image.shape[0], image.shape[1]])
         mask_image[:, :, 2] = np.zeros([image.shape[0], image.shape[1]])
         # put 255 in place of number plate
         corners = np.array([[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]])
         cv2.fillPoly(mask_image, corners, (255, 255, 255))
         # resetting click_count
         click_count = 0
         # do a multiplication operation of image and mask to extract number plate
         number_plate = image_copy * (mask_image / 255)
         # show number plate
         cv2.imshow("Object of Interest", number_plate)
         cv2.waitKey(0)
         cv2.destroyAllWindows()
         # asking user response on above marking
         user_response = input(
             "Press 'd' key for DONE or 'r' key to repeat: ")
         # if number is properly marked
         if (user_response == 'd'):
             # save mask
             mask_image_name = files[i].split(".")[0] + "_mask.jpg"
Example #60
0
def detectLines(image, origImg, defaultLine1, defaultLine2, secondPoints):
    allp1 = []
    allp2 = []
    retPoints1x = []
    retPoints1y = []
    retPoints2x = []
    retPoints2y = []
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    image = cv2.Canny(image, 20, 200)
    (height, width) = image.shape
    #cv2.imshow("canny", image)
    #cv2.waitKey()
    imageROI1 = image[:, 0:width//2]
    imageROI2 = image[:, width//2:width]
    #cv2.imshow("ROI", imageROI1)
    #cv2.imshow("ROI", imageROI2)
    #cv2.waitKey()
    lines1 = cv2.HoughLinesP(imageROI1, 4, np.pi/180, 80, lines = 1, minLineLength=10, maxLineGap=600)
    lines2 = cv2.HoughLinesP(imageROI2, 4, np.pi/180, 80, lines = 1, minLineLength=10, maxLineGap=600)
    #image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    lineImg = copy.deepcopy(origImg)
    if lines1 is not None:
        for lineSet in lines1:
            for line in lineSet:
                lineSlope = (line[1] - line[3]) / (line[0] - line[2])
                if (lineSlope > -100 and lineSlope < -0.5) or (lineSlope < 100 and lineSlope > 0.3):
                    cv2.line(lineImg, (line[0], line[1]), (line[2], line[3]), (0,0, 255), thickness=10)
    if lines2 is not None:
        for lineSet in lines2:
            for line in lineSet:
                lineSlope = (line[1] - line[3]) / (line[0] - line[2])
                if (lineSlope > -100 and lineSlope < -0.5) or (lineSlope < 100 and lineSlope > 0.3):
                    cv2.line(lineImg, (line[0] + width//2, line[1]), (line[2] + width//2, line[3]), (0,0, 255), thickness=10)



    #line averaging

    currInd = 0
    points1x = []
    points1y = []
    points2x = []
    points2y = []
    polyPoints = []
    bottomPointFound = 10
    lastGoodLine1 = defaultLine1
    print("Last Good:" + (str)(lastGoodLine1))
    lastGoodLine2 = defaultLine2
    print("Last Good2:" + (str)(lastGoodLine2))

    if lines1 is not None:
        for lineSet in lines1:
            for line in lineSet:
                lineSlope = (-line[1] + line[3]) / (line[0] - line[2])
                if (lineSlope > -100 and lineSlope < -0.3) or (lineSlope < 100 and lineSlope > 0.3):
                    yInt = (-line[1]) - (lineSlope * line[0])
                    print("Line Slope:" + str(lineSlope))
                    divisorForYBetweenDots = 100
                    for mult in range(divisorForYBetweenDots):
                        currY = int(height*(mult/divisorForYBetweenDots))
                        p1 = [((-currY-yInt)/lineSlope), currY]
                        if line[0] > line[2]:
                            minX = line[2]
                            maxX = line[0]
                        else:
                            minX = line[0]
                            maxX = line[2]
                        if p1[0] > minX and p1[0] < maxX:
                            allp1.append(p1)
                            #allp2.append(p2)
                            points1x.append(p1[0])
                            #points1x.append(p2[0])
                            points1y.append(p1[1])
                            #points1y.append(p2[1])
        retPoints1x = points1x
        retPoints1y = points1y
        #counting in secondary points
        if secondPoints is not None:
            points1x = points1x + points1x + points1x
            points1y = points1y + points1y + points1y
            for pointSet in secondPoints:
                secondPointsCurr = pointSet[0]
                points1x = points1x + secondPointsCurr[0]
                points1y = points1y + secondPointsCurr[1]

        if len(points1x) > 0:
            a, b, c = np.polyfit(points1y, points1x, 2)
            lastGoodLine1 = [a, b, c]
            allowedLength = (int)(height//3)
            for y in range(height):
                y = height-y
                x = (int)(a*(y*y) + b*y + c)
                cv2.circle(origImg, (x, y), 5, (0,0,255))
                bottomPointFound = bottomPointFound - 1
                #if x < height:
                allowedLength = allowedLength - 1
                if bottomPointFound == 0:
                    polyPoints.append([x, y])
                    bottomPointFound = 10
                if allowedLength == 0:
                    polyPoints.append([x, y])
                    break
    if lastGoodLine1 is not None and len(points1x) <= 0:
        print("Drawing old line")
        a = lastGoodLine1[0]
        b = lastGoodLine1[1]
        c = lastGoodLine1[2]
        allowedLength = (int)(height//3)
        for y in range(height):
            y = height - y
            x = (int)(a * (y * y) + b * y + c)
            cv2.circle(origImg, (x, y), 5, (0, 0, 255))
            bottomPointFound = bottomPointFound - 1
            allowedLength = allowedLength - 1
            if bottomPointFound == 0:
                polyPoints.append([x, y])
                bottomPointFound = 10
            if allowedLength == 0:
                polyPoints.append([x, y])
                break
    bottomPointFound = 10
    polyPoints = list(reversed(polyPoints))
    if lines2 is not None:
        for lineSet in lines2:
            for line in lineSet:
                lineSlope = (-line[1] + line[3]) / (line[0] - line[2])
                if (lineSlope > -100 and lineSlope < -0.3) or (lineSlope < 100 and lineSlope > 0.3):
                    yInt = (-line[1]) - (lineSlope * line[0])
                    divisorForYBetweenDots = 100
                    for mult in range(divisorForYBetweenDots):
                        currY = int(height*(mult/divisorForYBetweenDots))
                        p1 = [((-currY-yInt)/lineSlope) + width//2, currY]
                        if line[0] > line[2]:
                            minX = line[2]
                            maxX = line[0]
                        else:
                            minX = line[0] + width//2
                            maxX = line[2] + width//2
                        if p1[0] > minX and p1[0] < maxX:
                            allp2.append(p1)
                            #allp2.append(p2)
                            points2x.append(p1[0])
                            #points1x.append(p2[0])
                            points2y.append(p1[1])
                            #points1y.append(p2[1])
        retPoints2x = points2x
        retPoints2y = points2y
        if secondPoints is not None:
            points2x = points2x + points2x + points2x
            points2y = points2y + points2y + points2y
            for pointSet in secondPoints:
                secondPointsCurr = pointSet[1]
                points2x = points2x + secondPointsCurr[0]
                points2y = points2y + secondPointsCurr[1]

        if len(points2x) > 0:
            a, b, c = np.polyfit(points2y, points2x, 2)
            lastGoodLine2 = [a, b, c]
            allowedLength = (int)(height//3)
            for y in range(height):
                y = height-y
                x = (int)(a*(y*y) + b*y + c)
                cv2.circle(origImg, (x, y), 5, (0,0,255))
                bottomPointFound = bottomPointFound - 1
                #if x < height:
                allowedLength = allowedLength - 1
                if bottomPointFound == 0:
                    polyPoints.append([x, y])
                    bottomPointFound = 10
                if allowedLength == 0:
                    print(bottomPointFound)
                    polyPoints.append([x, y])
                    break
    print(lastGoodLine2 is not None and len(points2x) <= 0)
    if lastGoodLine2 is not None and len(points2x) <= 0:
        print("Drawing old line")
        a = lastGoodLine2[0]
        b = lastGoodLine2[1]
        c = lastGoodLine2[2]
        allowedLength = (int)(height//3)
        for y in range(height):
            y = height - y
            x = (int)(a * (y * y) + b * y + c)
            cv2.circle(origImg, (x, y), 5, (0, 0, 255))
            bottomPointFound = bottomPointFound - 1
            allowedLength = allowedLength - 1
            if bottomPointFound == 0:
                polyPoints.append([x, y])
                bottomPointFound = 10
            if allowedLength == 0:
                polyPoints.append([x, y])
                break
    if len(polyPoints) > 4:
        # temp = polyPoints[2]
        # polyPoints[2] = polyPoints[3]
        # polyPoints[3] = temp
        polyPoints =np.array(polyPoints)
        newImg = np.zeros_like(origImg)
        cv2.fillPoly(newImg, [polyPoints], (0,255,0))
        origImg = cv2.addWeighted(origImg, 1, newImg, 0.4, 0)
    if len(allp1) > 0:
        for p1 in allp1:
            lineImg = cv2.circle(lineImg, (int(p1[0]), int(p1[1])), 10, (255,0,0), thickness= -1)
    if len(allp2) > 0:
        for p2 in allp2:
            lineImg = cv2.circle(lineImg, (int(p2[0]), int(p2[1])), 10, (255, 0, 0), thickness=-1)
    return origImg, lastGoodLine1, lastGoodLine2, lineImg, [[retPoints1x, retPoints1y], [retPoints2x, retPoints2y]]