def extract_background_values():
    '''function to extract initial background values from initial_im_set'''
    _, _, im_num = co.data.initial_im_set.shape
    valid_freq = np.zeros(co.data.initial_im_set[:, :, 0].shape)
    co.meas.background = np.zeros(co.data.initial_im_set[:, :, 0].shape)
    initial_nonzero_im_set = np.zeros(co.data.initial_im_set.shape)
    valid_values = np.zeros_like(co.meas.background)
    co.meas.all_positions = np.fliplr(cv2.findNonZero(np.ones_like(
        co.meas.background, dtype=np.uint8)).squeeze()).reshape(co.meas.background.shape + (2,))
    for count in range(im_num):
        valid_values[
            co.data.initial_im_set[:, :, count] > 0
        ] = co.data.initial_im_set[:, :, count][
            co.data.initial_im_set[:, :, count] > 0]
        co.meas.background = co.meas.background + \
            co.data.initial_im_set[:, :, count]
        valid_freq[co.data.initial_im_set[:, :, count] != 0] += 1

        initial_nonzero_im_set[:, :, count] += co.data.initial_im_set[
            :, :, count] != 0
    co.meas.valid_values = (valid_values * 255).astype(np.uint8)

    co.meas.trusty_pixels = (
        (valid_freq) == np.max(valid_freq)).astype(np.uint8)
    valid_freq[valid_freq == 0] = 1
    co.meas.background = co.meas.background / valid_freq

    return valid_freq, initial_nonzero_im_set, im_num
Esempio n. 2
0
def find_train(whichCam):
    with CamCM(whichCam) as camera:
        firstFrame = None
        begin = time.time()
        while True:
            (grabbed, frame) = camera.read()
            if not grabbed:
                break
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (21, 21), 0)
            if firstFrame is None:
                firstFrame = gray
                continue
            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
            lel = cv2.findNonZero(thresh)
            if lel is not None:
                if len(lel) > 200:
                    break
            if time.time() - begin > 3:
                return -1,-1
        x = 0
        y = 0
        try:
            for element in lel:
                y += element[0][0]
                x += element[0][1]
            y = y / len(lel)
            x = x / len(lel)
        except:
            print "Brak odczytu z kamery"
    return x, y
Esempio n. 3
0
def straighten_table(image):
    """Rotates a given image so that the football table is straight.

    In English:
        - Find the table and determine its corners
        - From corners, find lower long side of the table
        - Calculate rotation from the line and rotate the image
    """
    bw_image = find_blue(image)

    if DEBUG:
        cv2.imwrite('debug/found_blue.jpg', bw_image)

    non_zero_pixels = cv2.findNonZero(bw_image)

    rect = cv2.minAreaRect(non_zero_pixels)
    precise_corners = cv2.cv.BoxPoints(rect)
    corners = np.int0(np.around(precise_corners))

    if DEBUG:
        corners_im = draw_points(image, corners)
        cv2.imwrite('debug/found_corners.jpg', corners_im)

    # Find lowest long side of the table and straigthen based on it
    lower_a, lower_b = find_lower_long_side(corners)

    if DEBUG:
        lower_line_im = draw_lines(image, [[lower_a, lower_b]])
        cv2.imwrite('debug/lower_long_side.jpg', lower_line_im)

    rotation = rad_to_deg(calculate_line_rotation(lower_a, lower_b))
    # Rotate based on the other end of the line
    rotated_image = rotate_image(image, rotation, rotation_point=lower_a)
    return rotated_image
Esempio n. 4
0
 def getIslandPoints(self, islandImg):
     nonZeroCoord = cv2.findNonZero(islandImg) #this function returns points in ndarray form [x,y]
     # gets the center of mass and stores all the coords in a map
     for i in range(0,nonZeroCoord.size):
         x = nonZeroCoord[i][0][0]
         y = nonZeroCoord[i][0][1];
         coords = str(y)+","+ str(x)
         if not self.coordMap.has_key(coords):
             self.coordMap[coords] = (y,x)
Esempio n. 5
0
 def calculateWidth(self):
     if self.badSkeletonization:
         return
     # approximate width as 2*shortest path to contour at midpoint
     mp = np.flipud(self.toCroppedCoordinates(self.midpoint))
     self.outlineWorm()
     cpts = np.float64(cv2.findNonZero(np.uint8(self.outlinedWormImage)))
     self.width = (min(np.sqrt(np.sum(np.float64(cpts - mp)**2, axis=2)))
                   * 2.0 / self.videoRegion.imageProcessor.pixelSize)[0]
Esempio n. 6
0
def watershed(image, marker):
    m = marker.copy()
    cv2.watershed(image, m)
    m[m != 1] = 0
    m *= 255
    points = cv2.findNonZero(m.astype(np.uint8))
    bound_rect = cv2.boundingRect(points)
    # x, y, w, h = bound_rect
    return bound_rect
Esempio n. 7
0
def deskew(image, angle):
    print angle
    image = cv2.bitwise_not(image)
    non_zero_pixels = cv2.findNonZero(image)
    center, wh, theta = cv2.minAreaRect(non_zero_pixels)

    root_mat = cv2.getRotationMatrix2D(center, angle, 1)
    rows,cols = image.shape[:2]
    rotated = cv2.warpAffine(image, root_mat, (cols, rows), flags=cv2.INTER_CUBIC)
    return cv2.bitwise_not(cv2.getRectSubPix(rotated, (cols, rows), center))
Esempio n. 8
0
def neighbors(src, dstIdx, nbrs, d=5):
    srcIdx = cv2.findNonZero(src).reshape(-1, 2)
    # srcIdx = np.vstack(np.nonzero(src)[::-1]).T
    # import ipdb; ipdb.set_trace()
    length, _ = srcIdx.shape
    rndIdx = np.random.choice(length, 600)
    srcIdx = srcIdx[rndIdx]
    distances, indices = nbrs.kneighbors(srcIdx)
    idx = distances < d
    nKeys = dstIdx[indices[idx]]
    oKeys = srcIdx[idx.ravel()]
    return oKeys, nKeys
Esempio n. 9
0
    def color_contours(self, blob_img, contours):
        """
        Return a colored image where the regions within certain the contours
        is colored in.
        :param blob_image:
        :return:
        """
        labeled_img = np.zeros(blob_img.shape + (3, ), np.uint8)
        colors = ((0,0,255),(0,255,0),(255,0,0),(0,255,255),(255,0,255), (255, 255, 0))
        pnts_list = []
        mask_list = []
        for ind, contour in enumerate(contours):
            mask = np.zeros(blob_img.shape, np.uint8)
            cv2.drawContours(mask, [contour], 0, 255, -1, 8)
            pixel_points = cv2.findNonZero(mask)#(x,y)

            labeled_img[mask == 255] = colors[ind]
            pnts_list.append(pixel_points)
            mask_list.append(mask)

        k = 0
        angles = []
        for cnt in contours:
            if len(cnt) < 10:
                #don't care about tiny contours
                # this should have already been protected for in the
                # large_contour code, but that is technically area
                angles.append(0)
                continue

            pixel_points = pnts_list[k]
            M = cv2.moments(cnt)#expects to get a contour - uses Green's theorem

            #center of blob
            cx = int(M['m10']/M['m00'])
            cy = int(M['m01']/M['m00'])

            #ellipsoid outline of blob
            ellipse = cv2.fitEllipse(cnt)

            (x, y), (MA, ma), angle = cv2.fitEllipse(pixel_points)#yet another way to get the angle
            angles.append(angle)

            #line fitting, THIS WAS SLOWING ME DOWN
            #DIST_L1 = 1: |x1-x2| + |y1-y2| */, DIST_L2 = 2: euclidean distance, DIST_C = : max(|x1-x2|,|y1-y2|)
            [vx, vy, x, y] = cv2.fitLine(pixel_points, 1, 0, 0.01, 0.01)
            pt1 = (np.array((x, y)) + 20*np.array((vx, vy))).astype('int32')
            pt2 = (np.array((x, y)) - 20*np.array((vx, vy))).astype('int32')
            cv2.line(labeled_img, tuple(pt1), tuple(pt2), (0, 128, 128), 2, 8)
            k += 1

        return labeled_img, angles
Esempio n. 10
0
def IdHullOne(frame1,frame2):
    # calculate frame difference and erode background
    frameAbsDiff = cv2.absdiff(frame1,frame2)
    retval, threshMask = cv2.threshold(frameAbsDiff,25,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
    #threshMask = cv2.dilate(threshMask, None, iterations = 4)    
    erodeMask = cv2.erode(threshMask, None, iterations = 2)
    # locate remaining pixels and calculate encompassing convex polygon
    whitePixels = cv2.findNonZero(erodeMask)
    whitePixels = whitePixels[:,0]
    hullPolygon = cv2.convexHull(whitePixels)
    hullPolygon = hullPolygon[:,0]
    
    return hullPolygon, frameAbsDiff
Esempio n. 11
0
 def final_fitting(c,edges):
     #use the real edge pixels to fit, not the aproximated contours
     support_mask = np.zeros(edges.shape,edges.dtype)
     cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
     # #draw into the suport mast with thickness 2
     new_edges = cv2.min(edges, support_mask)
     new_contours = cv2.findNonZero(new_edges)
     if self._window and visualize:
         new_edges[new_edges!=0] = 255
         overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
     new_e = cv2.fitEllipse(new_contours)
     return new_e,new_contours
Esempio n. 12
0
def colorSegment(input, color, threshold = 5):


    color_filter = initColor()

    original = input.copy()

    input = cv2.GaussianBlur(input, (7, 7), 1)

    hsv = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)

    color_hsv = color_filter[color]

    masks = []
    for c in color_hsv:
        lower = np.array(c[0])
        upper = np.array(c[1])
        m = cv2.inRange(hsv, lower, upper)
        masks.append(m)

    if(len(masks)>0):
        mask = masks[0]
    for m in masks:
        mask = mask + m


    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (threshold, threshold))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    res = cv2.bitwise_and(input,input, mask= mask)


    pixels = cv2.findNonZero(mask)

    left, right, top, bottom = 0, 1, 0, 1



    if(pixels is None):
        return input, np.zeros_like(original)

    left = min(pixels, key=lambda x: x[0][1])[0][1]
    right = max(pixels, key=lambda x: x[0][1])[0][1]
    top = min(pixels, key=lambda x: x[0][0])[0][0]
    bottom = max(pixels, key=lambda x: x[0][0])[0][0]


    crop = original[left:right, top:bottom]


    return original, mask
Esempio n. 13
0
def f2fEst(prev, curr):
    prevPts = cv2.findNonZero(prev).reshape(-1, 2)
    currPts = cv2.findNonZero(curr).reshape(-1, 2)

    nbrs = NearestNeighbors(
        n_neighbors=1, radius=1.0, algorithm='auto').fit(prevPts)
    length, _ = currPts.shape
    rndIdx = np.random.choice(length, 1000)
    currPts = currPts[rndIdx]
    distances, indices = nbrs.kneighbors(currPts)
    idx = distances < 50
    oKeys = prevPts[indices[idx]]
    nKeys = currPts[idx.ravel()]
    print len(prevPts), len(currPts), len(nKeys)
    M, mask = cv2.findHomography(oKeys.reshape(-1, 1, 2),
                                 nKeys.reshape(-1, 1, 2),
                                 cv2.RANSAC, 5.0)

    if mask is not None and np.sum(mask) > 200:
        matchesMask = mask.ravel().tolist()
        return oKeys, nKeys, matchesMask, M
    else:
        return [], [], [], None
def tight_crop(image):
    """Produce a tightly-cropped version of the image, and add alpha channel if needed

    :param image: PIL image
    :return: PIL image
    """
    if image.mode != 'RGBA':
        image = image.convert('RGBA')

    alpha = np.array(image)[:,:,3]
    nonzero_points = cv2.findNonZero(alpha)
    x, y, w, h = cv2.boundingRect(nonzero_points)
    cropped_image = image.crop((x, y, x+w, y+h))
    return cropped_image
Esempio n. 15
0
def remove_empty_space(img, pre_crop=0):
    # img = cv2.imread('pg13_gau_preview.png')
    if pre_crop > 0:
        img = img[:-pre_crop, :-pre_crop]  # Perform pre-cropping
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # convert to grayscale
    gray = 255 * (gray < 128).astype(np.uint8)  # To invert the text to white
    gray = cv2.morphologyEx(gray, cv2.MORPH_OPEN,
                            np.ones((2, 2),
                                    dtype=np.uint8))  # Perform noise filtering
    coords = cv2.findNonZero(gray)  # Find all non-zero points (text)
    x, y, w, h = cv2.boundingRect(coords)  # Find minimum spanning bounding box
    rect = img[y:y + h, x:x +
               w]  # Crop the image - note we do this on the original image
    return rect
Esempio n. 16
0
def find_horizontal_lines(invert_img):
    nonzero_score_thresh = 0.80
    highest = 0
    lowest = None
    for i, line in enumerate(invert_img):
        nonzero = cv2.findNonZero(line)
        if nonzero is None:
            nonzero = []
        nonzero_score = len(nonzero) / len(line)
        if nonzero_score > nonzero_score_thresh:
            if lowest is None:
                lowest = i
            highest = i
    return lowest, highest
def cont(ps, j, point, C):

    nonzero = cv2.findNonZero(ps)
    distances = np.sqrt((nonzero[:, :, 0] - point[1])**2 +
                        (nonzero[:, :, 1] - point[0])**2)
    nearest_index = np.argmin(distances)
    x, y = nonzero[nearest_index][0]

    cont, _ = cv2.findContours(ps, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    for c in cont:
        if (cv2.pointPolygonTest(c, (x, y), True) >= 0):
            cv2.drawContours(j, c, -1, (255, 255, 255), 4)
            cv2.drawContours(C, c, -1, (255, 255, 255), 1)
            return c
def routine_select_ref_angle(bin_side_images):

    max_len = 0
    max_angle = None
    
    for angle in bin_side_images:
    
        x_pos, y_pos, x_len, y_len = cv2.boundingRect(cv2.findNonZero(bin_side_images[angle]))

        if x_len > max_len:
            max_len = x_len
            max_angle = angle

    return max_angle
Esempio n. 19
0
 def getCorner(self, block):
     points = cv2.findNonZero(block)
     x, y, w, h = cv2.boundingRect(points)
     top_right = (x + w - 1, y + h - 1)
     top_left = (x, y + h - 1)
     bottom_right = (x + w - 1, y)
     bottom_left = (x, y)
     corner = {
         "top_left": top_left,
         'bottom_left': bottom_left,
         'top_right': top_right,
         'bottom_right': bottom_right
     }
     return corner
Esempio n. 20
0
def fit_line_to_points(img):
    """Fits a regression line to the non-zero pixels in a image"""
    y_max = img.shape[0]
    pixels = cv2.findNonZero(img)
    xs, ys = split_XYs_out_from_pixels(pixels)

    # x = m*y + b, y as function of x because we want to draw a line from bottom
    # of image to the middle (so y is the input)
    m, b, r_value, p_value, std_err = scipy.stats.linregress(ys, xs)

    x1 = m * y_max + b
    x2 = m * y_max * 0.60 + b

    return int(x1), int(y_max), int(x2), int(y_max * 0.60), abs(r_value)
Esempio n. 21
0
 def final_fitting(c,edges):
     #use the real edge pixels to fit, not the aproximated contours
     support_mask = np.zeros(edges.shape,edges.dtype)
     cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
     # #draw into the suport mast with thickness 2
     new_edges = cv2.min(edges, support_mask)
     new_contours = cv2.findNonZero(new_edges)
     if self._window and visualize:
         new_edges[new_edges!=0] = 255
         overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
         overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
     new_e = cv2.fitEllipse(new_contours)
     return new_e,new_contours
Esempio n. 22
0
def detect_color(videoinput, lower_bound, higher_bound, axis = 1): 
    lower_bound = np.array(lower_bound)
    higher_bound = np.array(higher_bound)
    hsv = cv2.cvtColor(videoinput, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, lower_bound, higher_bound)
    points = cv2.findNonZero(mask)
    result = []
    if str(type(points)) == "<class 'NoneType'>": 
        return result
    total = 0
    for point in points: 
        x, y = point[0][0], point[0][1]
        result.append([int(x), int(y)])
    return result
Esempio n. 23
0
	def getDistance(self):
		#function to return distance of ball
		ret,frame = cap.read()
		self.maskBall = cv2.inRange(frame,self.colorLowBall,self.colorHighBall)
		self.maskEnd1 = cv2.inRange(frame,self.colorLowEnd1,self.colorHighEnd1)
		self.maskEnd2 = cv2.inRange(frame,self.colorLowEnd2,self.colorHighEnd2)
		
		self.CoordinatesBall = cv2.findNonZero(self.maskBall)
		self.CoordinatesEnd1 = cv2.findNonZero(self.maskEnd1)
		self.CoordinatesEnd2 = cv2.findNonZero(self.maskEnd2)
		
		#Coordinates of Ball
		if (len(np.shape(self.CoordinatesBall)) > 0):
			self.ballExists = True
			self.xBall = np.average(np.transpose(self.CoordinatesBall)[0][0])
			self.yBall = np.average(np.transpose(self.CoordinatesBall)[1][0])
		else:
			self.ballExists = False	
		
		#Coordinates of End 1
		if (len(np.shape(self.CoordinatesEnd1)) > 0):
			self.end1Exists = True	
			self.xEnd1 = np.average(np.transpose(self.CoordinatesEnd1)[0][0])
			self.yEnd1 = np.average(np.transpose(self.CoordinatesEnd1)[1][0])
		else:
			self.end1Exists = False
		
		#Coordinates of End 2
		if (len(np.shape(self.CoordinatesEnd2)) > 0):
			self.end2Exists = True	
			self.xEnd2 = np.average(np.transpose(self.CoordinatesEnd2)[0][0])
			self.yEnd2 = np.average(np.transpose(self.CoordinatesEnd2)[1][0])
		else:
			self.end2Exists = False
		
		if (self.ballExists and self.end1Exists and self.end2Exists):
			return (self.euclideanDistance(self.xBall,self.yBall,self.xEnd2,self.yEnd2)/self.euclideanDistance(self.xEnd2,self.yEnd2,self.xEnd1,self.yEnd1)) * 40 
Esempio n. 24
0
def find_isaac(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # # Set upper and lower bound of Hue, Saturation and Value using the tracking window
    # l_h = cv2.getTrackbarPos("LH", "Tracking")
    # l_s = cv2.getTrackbarPos("LS", "Tracking")
    # l_v = cv2.getTrackbarPos("LV", "Tracking")
    # u_h = cv2.getTrackbarPos("UH", "Tracking")
    # u_s = cv2.getTrackbarPos("US", "Tracking")
    # u_v = cv2.getTrackbarPos("UV", "Tracking")
    # l_c = np.array([l_h, l_s, l_v])
    # u_c = np.array([u_h, u_s, u_v])

    l_c = np.array([0, 63, 167])
    u_c = np.array([0, 65, 208])

    # Create a mask using the upper and lower color values
    mask = cv2.inRange(hsv, l_c, u_c)

    # Put the mask over the original image to show only isaac
    res_img = cv2.bitwise_and(img, img, mask=mask)

    # Convert image to so everything expect isaac becomes white
    res_img = cv2.cvtColor(res_img, cv2.COLOR_BGR2GRAY)
    # res_img = 255-res_img

    # Find isaac in image by checking non-zero pixels
    isaac_img = cv2.findNonZero(res_img)
    if isaac_img is not None:
        # Find top, bottom, left and right edges of isaac
        left = isaac_img[:, 0, 0].min()
        right = isaac_img[:, 0, 0].max()
        top = isaac_img[:, 0, 1].min()
        bottom = isaac_img[:, 0, 1].max()

        # Find center of isaac by averaging edges
        center_x = (left + right) / 2
        center_y = (top + bottom) / 2

        # print("Isaac center: {}, {}".format(center_x, center_y))

        if show_windows:
            cv2.imshow("OpenCV/Numpy HSV", hsv)
            cv2.imshow("OpenCV/Numpy Result", res_img)

        return [center_x, center_y]
    else:
        return None
    pass
Esempio n. 25
0
 def run(self):
     #self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
     ret, img = cv2.threshold(cv2.cvtColor(self.frame, cv2.COLOR_GRAY2BGR),
                              180, 255, cv2.THRESH_BINARY)
     sample = cv2.findNonZero(cv2.cvtColor(img,
                                           cv2.COLOR_BGR2GRAY)).reshape(
                                               -1, 2)
     print(len(sample))
     bsas_instance = bsas(sample, self.max_clusters, self.threshold)
     bsas_instance.process()
     clusters = bsas_instance.get_clusters()
     representatives = bsas_instance.get_representatives()
     self.cls = []
     self.bnd = []
     for cluster in clusters:
         sum = np.array([0, 0], dtype=np.float32)
         pts = []
         for i in cluster:
             pts.append(list(sample[i]))
             sum += sample[i]
         cls_pts = np.vstack(
             (pts[np.where(
                 np.array(pts)[:, 0] == np.max(np.array(pts)[:, 0]))[0][0]],
              pts[np.where(
                  np.array(pts)[:, 0] == np.min(np.array(pts)[:,
                                                              0]))[0][0]],
              pts[np.where(
                  np.array(pts)[:, 1] == np.max(np.array(pts)[:,
                                                              1]))[0][0]],
              pts[np.where(
                  np.array(pts)[:, 1] == np.min(np.array(pts)[:,
                                                              1]))[0][0]]))
         self.bnd.append([
             np.min(cls_pts[:, 0]),
             np.min(cls_pts[:, 1]),
             np.max(cls_pts[:, 0]) - np.min(cls_pts[:, 0]),
             np.max(cls_pts[:, 1]) - np.min(cls_pts[:, 1])
         ])
         sum = sum / len(cluster)
         self.cls.append(sum)
     self.cls = np.array(self.cls).reshape(-1, 2)
     print(len(self.cls))
     #self.log.append(self.LED.reshape(1, -1))
     if self.camCounter > 1000:
         self.videoStream.release()
         #np.savetxt('log.csv', self.log)
     #else:
     #self.videoStream.write(cv2.cvtColor(self.frame, cv2.COLOR_GRAY2BGR))
     self.camCounter = self.camCounter + 1
Esempio n. 26
0
def run(iteration: int, img: np.ndarray, data: Dict[str, Any],
        global_data: Dict[str, Any]) -> (np.ndarray, bool):
    retval, labels, stats, centroids = global_data["connected"]["connected"]
    original = img

    if "canny" in global_data:
        original = global_data["canny"]["input"]

    img = np.copy(img)

    if len(img.shape) == 2:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

    groups = {}  # type: Dict[int, np.ndarray]
    for group_id in range(1, retval):
        mask = np.zeros(labels.shape, np.uint8)
        mask[labels == group_id] = 255
        groups[group_id] = cv2.findNonZero(mask)

    midpoints = []
    for group_id, group in groups.items():
        other_groups = None  # type: np.ndarray
        for other_group_id, other_group in groups.items():
            if group_id == other_group_id:
                continue
            if other_groups is None:
                other_groups = other_group
            else:
                other_groups = np.concatenate((other_groups, other_group))

        # From step 3 of Michael's code
        for pixel in group:
            distances = np.sqrt((other_groups[:, :, 0] - pixel[0][0])**2 +
                                (other_groups[:, :, 1] - pixel[0][1])**2)
            nearest_index = np.argmin(distances)
            nearest_inner_pixel = other_groups[nearest_index]
            midpointX = int((pixel[0][0] + nearest_inner_pixel[0][0]) / 2)
            midpointY = int((pixel[0][1] + nearest_inner_pixel[0][1]) / 2)
            midpoint = (midpointX, midpointY)
            if midpoint == (nearest_inner_pixel[0][0],
                            nearest_inner_pixel[0][1]):
                continue
            cv2.circle(img, midpoint, 1, [0, 0, 255], -1)
            midpoints.append(midpoint)

    data["midpoints"] = midpoints
    data["img"] = img
    global_data["michael"] = data
    return original, True
Esempio n. 27
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i",
                        "--image",
                        required=True,
                        help="path to input image file")
    args = vars(parser.parse_args())

    # load the image from disk
    image = cv.imread(cv.samples.findFile(args["image"]))
    if image is None:
        print("can't read image " + args["image"])
        sys.exit(-1)
    gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)

    # threshold the image, setting all foreground pixels to
    # 255 and all background pixels to 0
    thresh = cv.threshold(gray, 0, 255,
                          cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]

    # Applying erode filter to remove random noise
    erosion_size = 1
    element = cv.getStructuringElement(
        cv.MORPH_RECT, (2 * erosion_size + 1, 2 * erosion_size + 1),
        (erosion_size, erosion_size))
    thresh = cv.erode(thresh, element)

    coords = cv.findNonZero(thresh)
    angle = cv.minAreaRect(coords)[-1]
    # the `cv.minAreaRect` function returns values in the
    # range [-90, 0) if the angle is less than -45 we need to add 90 to it
    if angle < -45:
        angle = (90 + angle)

    (h, w) = image.shape[:2]
    center = (w // 2, h // 2)
    M = cv.getRotationMatrix2D(center, angle, 1.0)
    rotated = cv.warpAffine(image,
                            M, (w, h),
                            flags=cv.INTER_CUBIC,
                            borderMode=cv.BORDER_REPLICATE)
    cv.putText(rotated, "Angle: {:.2f} degrees".format(angle), (10, 30),
               cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

    # show the output image
    print("[INFO] angle: {:.2f}".format(angle))
    cv.imshow("Input", image)
    cv.imshow("Rotated", rotated)
    cv.waitKey(0)
Esempio n. 28
0
def main():
    '''
	This code crops the board.
	'''
    print(__doc__)

    try:
        fn = sys.argv[1]
    except IndexError:
        fn = "board.jpg"
    source = cv.imread(fn)
    print(source.shape)
    #Blur the image
    blur = cv.medianBlur(source, 5)
    blur = source
    #make a HSV image from the blurred picture
    hsv = cv.cvtColor(blur, cv.COLOR_BGR2HSV)
    #HSV. H: Hue, S: Saturation, V: Value
    #lower_red lowest values.	H:  0, S:90, V:0
    #upper_red highest values	H:20, S:255, V:255
    lower_blue = np.array([104, 15, 90])
    upper_blue = np.array([150, 255, 255])

    mask_blue = cv.inRange(hsv, lower_blue, upper_blue)
    res_blue = cv.bitwise_and(source, source, mask=cv.bitwise_not(mask_blue))
    #cv.imshow("source", source)
    #cv.imshow("mask", mask_blue)
    cv.imshow("res", res_blue)
    #Use of nonzero to get 0 and 1 values. To get the start of the board.
    nonzero = cv.findNonZero(mask_blue)
    start = start_board(nonzero, 10)
    x_low = nonzero[start_board(nonzero, 10)][0][0]
    y_low = nonzero[start_board(nonzero, 10)][0][1]
    x_high = nonzero[(len(nonzero) - 1)][0][0]
    y_high = nonzero[(len(nonzero) - 1)][0][1]
    #Crop the img to the desired format. First you give the lowest y coordinate then the end y. Same for x.
    crop_source = source[y_low:y_high, x_low:x_high]
    crop_mask_blue = mask_blue[y_low:y_high, x_low:x_high]
    #Show the images.
    cv.imshow("crop", crop_source)
    cv.imshow("Crop Mask Blue", crop_mask_blue)
    cv.imwrite("cropped.jpg", crop_source)
    cv.imwrite("cropped_mask.jpg", crop_mask_blue)
    #the end -----------------------------------------------
    while True:
        k = cv.waitKey(5) & 0xFF
        if k == 27:
            break
    cv.destroyAllWindows()
def get_highgrad_element(img, threshold=100):
    '''
	Finds high gradient areas in the image

	Arguments:
		img: Input image

	Returns:
		u: List of pixel locations
	'''

    laplacian = cv2.Laplacian(img, cv2.CV_8U)
    ret, thresh = cv2.threshold(laplacian, threshold, 255, cv2.THRESH_BINARY)
    u = cv2.findNonZero(thresh)
    return u
Esempio n. 30
0
def deskew(data, img):
    ## Find MinArea for Rotation
    pts = cv2.findNonZero(data)
    ret = cv2.minAreaRect(pts)

    (cx, cy), (w, h), ang = ret
    if w > h:
        w, h = h, w
        ang += 90

    ## Find Matrix and do Rotation
    M = cv2.getRotationMatrix2D((cx, cy), ang, 1.0)
    data = cv2.warpAffine(data, M, (img.shape[1], img.shape[0]))
    result = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
    return (data, result)
Esempio n. 31
0
def IdHullOne(frame1, frame2):
    # calculate frame difference and erode background
    frameAbsDiff = cv2.absdiff(frame1, frame2)
    retval, threshMask = cv2.threshold(frameAbsDiff, 25, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    #threshMask = cv2.dilate(threshMask, None, iterations = 4)
    erodeMask = cv2.erode(threshMask, None, iterations=2)

    # locate remaining pixels and calculate encompassing convex polygon
    whitePixels = cv2.findNonZero(erodeMask)
    whitePixels = whitePixels[:, 0]
    hullPolygon = cv2.convexHull(whitePixels)
    hullPolygon = hullPolygon[:, 0]

    return hullPolygon, frameAbsDiff
Esempio n. 32
0
    def deskewing(cls, image):
        # _, image = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY_INV)
        angle = cls.compute_skew(image)
        print(angle)
        angle = np.math.degrees(angle)
        # image = cv2.bitwise_not(image)
        non_zero_pixels = cv2.findNonZero(image)
        center, wh, theta = cv2.minAreaRect(non_zero_pixels)

        root_mat = cv2.getRotationMatrix2D(center, angle, 1)
        rows, cols = image.shape
        rotated = cv2.warpAffine(image, root_mat, (cols, rows), flags=cv2.INTER_CUBIC,
                                 borderMode=cv2.BORDER_CONSTANT, borderValue=(255, 255, 255))

        return cv2.getRectSubPix(rotated, (cols, rows), center)
Esempio n. 33
0
 def __init__(self, image):
     self.image = image
     self.point_set = cv2.findNonZero(self.image)
     offsets = (self.point_set.max(0) - self.point_set.min(0)) / 2
     self.point_set -= offsets
     self.initial_box_size = int(math.ceil(ROTATABLE_FACTOR * (
         self.point_set.max(0) - self.point_set.min(0)).max()))
     s = (self.point_set.max(0) - self.point_set.min(0)).max() / 2
     mpl.rcParams['toolbar'] = 'None'
     self.figure, self.ax = plt.subplots()
     self.ax.plot([-s, -s, s, s, -s], [-s, s, s, -s, -s])
     self.lines_pts, = self.ax.plot([], [], '.', color='gray', alpha=0.5)
     self.lines_pts2, = self.ax.plot([], [], '.', color='red')
     plt.axis('equal')
     self.patches = []
    def findEnemiesOnMiniMap(self):
		# Method that scan in minimap red dots and return the angle of the closest enemy (or None if there isn't a enemy)  
        im = auto.screenshot(region=(1720,100,120,80))
        r, g, b = cv2.split(asarray(im))
        ret,thresh1 = cv2.threshold(r,200,255,cv2.THRESH_BINARY)

        # Erase hero simbol on minimap:
        thresh1[15:65,35:85] = np.zeros((50,50))
        cv2.findNonZero(thresh1)

        # If theres same Enemy near:
        if np.max(thresh1) == 255:

            # Find Closest Enemy
            point = self.find_nearest_white(thresh1,(40,60))
            point = point[0] 

            # Convert point to center's map relative angle
            teta = self.angleOfPoint(point,(60,40))

            return teta

        # If not return None
        return None
Esempio n. 35
0
def avgColor(frame):
    # print("here")
    # dimension - # of rows
    width = frame.shape[0] / 2
    dpth = r.getDepth()
    count = 0
    flag = False

    # get list of all non zero pizels and average
    count = 0
    sum = 0
    target_loc = cv2.findNonZero(frame)
    depthSum = 0
    #r = Random()

    if (target_loc == None):
        return -1

    # print(type(target_loc))
    # print("here2 \n")

    if isinstance(target_loc, int):
        return -1

    for x in target_loc:
        row = x[0][1]
        col = x[0][0]
        count += 1

        sum += x[0][0]
        depthSum += dpth[row][col]

    # for i in range(0, 5):
    #     rand = r.

    # calc average
    avg = sum / count
    # if not nan check and exit if close
    print(depthSum / count)
    if not math.isnan(depthSum / count) and depthSum / count != 0:
        if depthSum / count < 250:
            print("arrived! distance: " + str(depthSum / count))
            r.drive(angSpeed=0, linSpeed=0)
            exit(0)

    # if no pixels in frame, ret -1
    # else return avg x coordinate - width
    return avg - width, count
Esempio n. 36
0
def processFrame(firstFrame,
                 undistorted,
                 K_inv,
                 upperRect,
                 lowerRect,
                 upperPlane,
                 lowerPlane,
                 debug=False):
    # Prepare the image for processing (thresholding) and find points
    hsv = cv2.cvtColor(undistorted, cv2.COLOR_BGR2HSV)
    inRange = cv2.inRange(hsv, hsvMin, hsvMax)
    final = cv2.morphologyEx(inRange, cv2.MORPH_OPEN, kernel4)
    laserPts = cv2.findNonZero(final)

    # DEBUG INFO
    if debug:
        if laserPts is not None:
            for p in laserPts:
                cv2.circle(undistorted, (p[0][0], p[0][1]), 1, (0, 0, 255))
        cv2.imshow('undistorted', undistorted)

    # Find reference points on desk and wall
    upper3DPoints, upperImgPoints = findReference3DPoints(
        final, upperRect, upperPlane, K_inv)
    lower3DPoints, lowerImgPoints = findReference3DPoints(
        final, lowerRect, lowerPlane, K_inv)

    # Then fit a plane if we have enough points
    if upper3DPoints is not None and lower3DPoints is not None:
        # Find the corrisponding laser plane
        referencePoints = np.array(upper3DPoints + lower3DPoints)
        laserPlane = fitPlane(referencePoints)

        # Find 3D points with line-plane intersection
        homoImgPoints = np.hstack((
            laserPts[:, 0],
            np.ones(laserPts.shape[0]).reshape(-1, 1),
        ))
        rays = createRays(homoImgPoints, K_inv)
        points3D = [linePlaneIntersection(laserPlane, ray) for ray in rays]

        # Recover colors for points from first frame
        x = laserPts.squeeze(1)
        colors = np.flip(firstFrame[x[:, 1], x[:, 0]].astype(np.float64) /
                         255.0,
                         axis=1)
        return points3D, colors, laserPlane
    return None, None, None
Esempio n. 37
0
def drive():
    from pynput import keyboard
    global driving, pressed
    start = time.time()
    keyboardActuator = keyboard.Controller()
    driving = True
    previous_mean = 0
    while driving:
        time_now = time.time() - start
        if time_now > 250:
            ReleaseKeyPynput(W)
            driving = False
            pressed = False
            print("Driving over")
        array = np.array(pyautogui.screenshot())
        crop_img = array[400:450, 600:1600]
        converted = cv.cvtColor(crop_img, cv.COLOR_RGB2BGR)
        cv.imshow("image", converted)
        cv.waitKey()
        mask = cv.inRange(converted, lower_color_bounds, upper_color_bounds)
        pixelpoints = cv.findNonZero(mask)
        if pixelpoints is None:
            ReleaseKeyPynput(A)
            ReleaseKeyPynput(D)
            continue
        xs = [pixelpoint[0][0] for pixelpoint in pixelpoints]
        mean = min(xs, key=lambda x: abs(x - mean_ideal))
        added_time = 0
        print(mean)
        #if previous_mean == 0:
        #    previous_mean = mean
        #else:
        #    added_time = (previous_mean - mean) / 10000
        if mean < mean_max:
            PressKeyPynput(A)
            time.sleep(abs(0.01 + abs((mean - mean_ideal) / 5000) +
                           added_time))
            ReleaseKeyPynput(A)

        elif mean > mean_min:
            PressKeyPynput(D)
            time.sleep(abs(0.01 + abs((mean - mean_ideal) / 5400) +
                           added_time))
            ReleaseKeyPynput(D)

        else:
            ReleaseKeyPynput(A)
            ReleaseKeyPynput(D)
Esempio n. 38
0
    def crop_and_save(self):
        # Read in the image and convert to grayscale
        img = cv2.imread(self.image_path)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # To invert the text to white
        gray = 255 * (gray < 128).astype(np.uint8)
        coords = cv2.findNonZero(gray)  # Find all non-zero points (text)
        # Find minimum spanning bounding box
        x, y, w, h = cv2.boundingRect(coords)
        # Crop the image - note we do this on the original image
        if ((img.shape[0] > (y + h + 10)) and (img.shape[1] > (x + w + 10))):
            rect = img[y:y + h + 10, x:x + w + 10]
        else:
            rect = img[y:y + h, x:x + w]

        cv2.imwrite(self.image_path, rect)
def get_mask(image):
    result = image.copy()
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    lower = np.array([50, 100, 100])
    upper = np.array([140, 255, 255])
    mask = cv2.inRange(image, lower, upper)

    #find bounds of a symbol and crop it
    points = cv2.findNonZero(mask)
    x, y, w, h = cv2.boundingRect(points)
    crop_img = mask[y:y + h, x:x + w]

    result = cv2.bitwise_and(result, result, mask=mask)

    return crop_img
    def correction_of_rotation(self, image):
        '''
            This function performs correction
        '''
        non_zero_pixels = cv2.findNonZero(image)
        center, wh, theta = cv2.minAreaRect(non_zero_pixels)

        if wh[0] > wh[1]:
            wh = (wh[1], wh[0])
            theta += 90

        root_matrix = cv2.getRotationMatrix2D(center, theta, 1)
        h, w = image.shape
        rotated_image = cv2.warpAffine(image, root_matrix, (w, h), flags=cv2.INTER_CUBIC)

        return cv2.getRectSubPix(rotated_image, (w, h), center)
Esempio n. 41
0
def make_labeled_image(img_gray, contours):
    #np.random.seed(2222)

    labeled_img = np.zeros(img_gray.shape + (3, ), np.uint8)
    pnts_list = []
    for cnt in contours:
        color = (255. * np.random.rand(3, )).astype('uint8')

        mask = np.zeros(img_open_close.shape, np.uint8)
        cv2.drawContours(mask, [cnt], 0, 255, -1, 8)
        pixel_points = cv2.findNonZero(mask)  #(x,y)

        labeled_img[mask == 255] = color
        pnts_list.append(pixel_points)  #Who knows? You might need this

    return labeled_img, pnts_list
def ImageContoursCustomSet2(img, isTesting=False):
    try:
        cimg2 = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        gray = cv2.cvtColor(cimg2, cv2.COLOR_BGR2GRAY)
    except:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    gray = 255 - gray
    cv2.imshow('', gray)
    cv2.waitKey(2020202)
    # _,cnts,_=cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cv2.findNonZero(gray)
    lstcont = []
    for i in cnts:
        lstcont.append([i[0, 0], i[0, 1]])
    return lstcont
def make_labeled_image(img_gray, contours):
    #np.random.seed(2222)

    labeled_img = np.zeros(img_gray.shape + (3, ), np.uint8)
    pnts_list = []
    for cnt in contours:
        color = (255. * np.random.rand(3, )).astype('uint8')

        mask = np.zeros(img_open_close.shape, np.uint8)
        cv2.drawContours(mask, [cnt], 0, 255, -1, 8)
        pixel_points = cv2.findNonZero(mask)#(x,y)

        labeled_img[mask == 255] = color
        pnts_list.append(pixel_points)#Who knows? You might need this

    return labeled_img, pnts_list
Esempio n. 44
0
def rotate(image):

    # minAreaRect on the nozeros
    pts = cv2.findNonZero(image)
    ret = cv2.minAreaRect(pts)

    (cx, cy), (w, h), ang = ret
    if w > h:
        w, h = h, w
        ang += 90

    # Find rotated matrix, do rotation
    M = cv2.getRotationMatrix2D((cx, cy), ang, 1.0)
    rotated = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))

    return rotated
Esempio n. 45
0
 def skeletonizeWorm(self):
     self.skeletonizedWormImage = morphology.skeletonize(self.bwWormImage)
     skeletonEnds = wp.find1Cpixels(self.skeletonizedWormImage)
     skeletonEndPts = cv2.findNonZero(np.uint8(skeletonEnds))
     if skeletonEndPts is None:
         skeletonEndPts = []
     nEndPts = len(skeletonEndPts)
     if nEndPts < 2:  # skeleton is a cirle (Omega turn)
         self.badSkeletonization = True
         self.crossedWorm = True
     elif nEndPts > 2:  # skeleton has spurs
         self.badSkeletonization = True
     else:
         skeletonInverted = np.logical_not(self.skeletonizedWormImage)
         skeletonPts, cost = \
             graph.route_through_array(np.uint8(skeletonInverted),
                                       np.flipud(skeletonEndPts[0][0]),
                                       np.flipud(skeletonEndPts[1][0]),
                                       geometric=True)
         self.skeleton = np.array([[pt[0], pt[1]] for pt in skeletonPts])
         self.badSkeletonization = False
    def ExtractPatchesRandomSampling(self, imgBGR, imgDepth, imgMask, fgMask, W, fgFlag, NumPatches):
        M,N,_ = imgBGR.shape
        halfW = int(W/2)
        colorPatches = []
        depthPatches = []

        if fgFlag == True:
            # extract patches for foreground
            mask = fgMask
        else:
            # extract patches for background
            mask = 255 - fgMask
            mask = cv2.bitwise_and(mask, mask, mask=imgMask)


        # random sampling init
        submask = np.zeros(fgMask.shape, np.uint8)
        cv2.rectangle(submask,(halfW+1,halfW+1),(N-halfW-1,M-halfW-1),255,-1)
        submask = cv2.bitwise_and(mask, mask, mask=submask)
        #cv2.imshow('mask', mask)
        #cv2.imshow('submask', submask)
        #WaitKey(0)

        pixelpoints = cv2.findNonZero(submask)
        index = np.random.choice(len(pixelpoints), NumPatches)

        # random sampling
        for i in range(len(index)):
            x = pixelpoints[index[i]][0][0]
            y = pixelpoints[index[i]][0][1]
            colorPatch = imgBGR[y-halfW: y+halfW, x-halfW: x+halfW, :]
            assert (colorPatch.shape == (W,W,3))
            colorPatches.append(colorPatch)
            depthPatch = imgDepth[y-halfW: y+halfW, x-halfW: x+halfW]
            assert (depthPatch.shape == (W,W))
            depthPatches.append(depthPatch)

        return colorPatches, depthPatches
def color_range_to_transparent(image, min_hsv, max_hsv):
    """Returns image where HSV color range is converted to transparent.

    image: OpenCV format image
    min: Minimum HSV value as np.array
    max: Maximum HSV value as np.array
    """
    bw_image = find_color(image, min_hsv, max_hsv)

    if DEBUG:
        cv2.imwrite('debug.jpg', bw_image)

    # Find the matching pixels
    non_zero_pixels = cv2.findNonZero(bw_image)

    # Add alpha channel to new image
    new_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2BGRA)

    for pixel in non_zero_pixels:
        x, y = pixel[0][1], pixel[0][0]
        new_image[x][y] = np.array([0, 0, 0, 0], np.uint8)

    cv2.imwrite('new.png', new_image)
Esempio n. 48
0
def getTransformationMatrix(img):
	#input should be a binarized image - text white, bg black
	
	#Find all white pixels
	pts = np.empty([0,0])
	pts = cv2.findNonZero(img)

	#Get rotated rect of white pixels
	rect = cv2.minAreaRect(pts)
	
	# rect[0] has the center of rectangle, rect[1] has width and height, rect[2] has the angle
	# To draw the rotated box and save the png image, uncomment below
	drawrect = img.copy()
	drawrect = cv2.cvtColor(drawrect, cv2.COLOR_GRAY2BGR)
	box = cv2.cv.BoxPoints(rect)
	box = np.int0(box) # box now has four vertices of rotated rectangle
	cv2.drawContours(drawrect,[box],0,(0,0,255),10)
	cv2.imwrite('rotated_rect.png', drawrect)

	#Change rotation angle if the tilt is in another direction
	rect = list(rect)
	if (rect[1][0] < rect[1][1]): # rect.size.width > rect.size.height
		temp = list(rect[1])
		temp[0], temp[1] = temp[1], temp[0]
		rect[1] = tuple(temp)
		rect[2] = rect[2] + 90.0

	#convert rect back to numpy/tuple
	rect = np.asarray(rect)
	
	#Rotate the image according to the found angle
	rotated_image = np.empty([0,0])
	M = cv2.getRotationMatrix2D(rect[0], rect[2], 1.0)
	#img = cv2.warpAffine(img, M, (img.shape[1],img.shape[0]))

	#returns the transformation matrix for this rotation
	return M
Esempio n. 49
0
    def detect(self,frame,user_roi,visualize=False):
        u_r = user_roi
        if self.window_should_open:
            self.open_window((frame.img.shape[1],frame.img.shape[0]))
        if self.window_should_close:
            self.close_window()

        if self._window:
            debug_img = np.zeros(frame.img.shape,frame.img.dtype)


        #get the user_roi
        img = frame.img
        r_img = img[u_r.view]
        #        bias_field = preproc.EstimateBias(r_img)
        # r_img = preproc.Unbias(r_img, bias_field)
        r_img = preproc.GaussBlur(r_img)
        r_img = preproc.RobustRescale(r_img)
        frame.img[u_r.view] = r_img
        gray_img = cv2.cvtColor(r_img,cv2.COLOR_BGR2GRAY)


        # coarse pupil detection

        if self.coarse_detection.value:
            integral = cv2.integral(gray_img)
            integral =  np.array(integral,dtype=c_float)
            x,y,w,response = eye_filter(integral,self.coarse_filter_min,self.coarse_filter_max)
            p_r = Roi(gray_img.shape)
            if w>0:
                p_r.set((y,x,y+w,x+w))
            else:
                p_r.set((0,0,-1,-1))
        else:
            p_r = Roi(gray_img.shape)
            p_r.set((0,0,None,None))
            w = img.shape[0]/2

        coarse_pupil_width = w/2.
        padding = coarse_pupil_width/4.
        pupil_img = gray_img[p_r.view]



        # binary thresholding of pupil dark areas
        hist = cv2.calcHist([pupil_img],[0],None,[256],[0,256]) #(images, channels, mask, histSize, ranges[, hist[, accumulate]])
        bins = np.arange(hist.shape[0])
        spikes = bins[hist[:,0]>40] # every intensity seen in more than 40 pixels
        if spikes.shape[0] >0:
            lowest_spike = spikes.min()
            highest_spike = spikes.max()
        else:
            lowest_spike = 200
            highest_spike = 255

        offset = self.intensity_range.value
        spectral_offset = 5
        if visualize:
            # display the histogram
            sx,sy = 100,1
            colors = ((0,0,255),(255,0,0),(255,255,0),(255,255,255))
            h,w,chan = img.shape
            hist *= 1./hist.max()  # normalize for display

            for i,h in zip(bins,hist[:,0]):
                c = colors[1]
                cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)
            cv2.line(img,(w,int(lowest_spike*sy)),(int(w-.5*sx),int(lowest_spike*sy)),colors[0])
            cv2.line(img,(w,int((lowest_spike+offset)*sy)),(int(w-.5*sx),int((lowest_spike+offset)*sy)),colors[2])
            cv2.line(img,(w,int((highest_spike)*sy)),(int(w-.5*sx),int((highest_spike)*sy)),colors[0])
            cv2.line(img,(w,int((highest_spike- spectral_offset )*sy)),(int(w-.5*sx),int((highest_spike - spectral_offset)*sy)),colors[3])

        # create dark and spectral glint masks
        self.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img,image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        cv2.dilate(binary_img, kernel,binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=highest_spike - spectral_offset)
        cv2.erode(spec_mask, kernel,spec_mask, iterations=1)

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))

        #open operation to remove eye lashes
        pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, kernel)

        if self.blur > 1:
            pupil_img = cv2.medianBlur(pupil_img,self.blur.value)

        edges = cv2.Canny(pupil_img,
                            self.canny_thresh,
                            self.canny_thresh*self.canny_ratio,
                            apertureSize= self.canny_aperture)


        # remove edges in areas not dark enough and where the glint is (spectral refelction from IR leds)
        edges = cv2.min(edges, spec_mask)
        edges = cv2.min(edges,binary_img)

        overlay =  img[u_r.view][p_r.view]
        if visualize:
            b,g,r = overlay[:,:,0],overlay[:,:,1],overlay[:,:,2]
            g[:] = cv2.max(g,edges)
            b[:] = cv2.max(b,binary_img)
            b[:] = cv2.min(b,spec_mask)

            # draw a frame around the automatic pupil ROI in overlay.
            overlay[::2,0] = 255 #yeay numpy broadcasting
            overlay[::2,-1]= 255
            overlay[0,::2] = 255
            overlay[-1,::2]= 255
            # draw a frame around the area we require the pupil center to be.
            overlay[padding:-padding:4,padding] = 255
            overlay[padding:-padding:4,-padding]= 255
            overlay[padding,padding:-padding:4] = 255
            overlay[-padding,padding:-padding:4]= 255

        if visualize:
            c = (100.,frame.img.shape[0]-100.)
            e_max = ((c),(self.pupil_max.value,self.pupil_max.value),0)
            e_recent = ((c),(self.target_size.value,self.target_size.value),0)
            e_min = ((c),(self.pupil_min.value,self.pupil_min.value),0)
            cv2.ellipse(frame.img,e_min,(0,0,255),1)
            cv2.ellipse(frame.img,e_recent,(0,255,0),1)
            cv2.ellipse(frame.img,e_max,(0,0,255),1)

        #get raw edge pix for later
        raw_edges = cv2.findNonZero(edges)

        def ellipse_true_support(e,raw_edges):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            distances = dist_pts_ellipse(e,raw_edges)
            support_pixels = raw_edges[distances<=1.3]
            # support_ratio = support_pixel.shape[0]/ellipse_circumference
            return support_pixels,ellipse_circumference

        # if we had a good ellipse before ,let see if it is still a good first guess:
        if self.strong_prior:
            e = p_r.sub_vector(u_r.sub_vector(self.strong_prior[0])),self.strong_prior[1],self.strong_prior[2]

            self.strong_prior = None
            if raw_edges is not None:
                support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
                support_ratio =  support_pixels.shape[0]/ellipse_circumference
                if support_ratio >= self.strong_perimeter_ratio_range[0]:
                    refit_e = cv2.fitEllipse(support_pixels)
                    if self._window:
                        cv2.ellipse(debug_img,e,(255,100,100),thickness=4)
                        cv2.ellipse(debug_img,refit_e,(0,0,255),thickness=1)
                    e = refit_e
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    goodness = min(1.,support_ratio)
                    pupil_ellipse = {}
                    pupil_ellipse['confidence'] = goodness
                    pupil_ellipse['ellipse'] = e
                    pupil_ellipse['roi_center'] = e[0]
                    pupil_ellipse['major'] = max(e[1])
                    pupil_ellipse['minor'] = min(e[1])
                    pupil_ellipse['apparent_pupil_size'] = max(e[1])
                    pupil_ellipse['axes'] = e[1]
                    pupil_ellipse['angle'] = e[2]
                    e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
                    norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
                    pupil_ellipse['norm_pupil'] = norm_center
                    pupil_ellipse['center'] = e_img_center
                    pupil_ellipse['timestamp'] = frame.timestamp

                    self.target_size.value = max(e[1])

                    self.confidence.value = goodness
                    self.confidence_hist.append(goodness)
                    self.confidence_hist[:-200]=[]
                    if self._window:
                        #draw a little animation of confidence
                        cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
                        lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
                        cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
                        self.gl_display_in_window(debug_img)
                    return pupil_ellipse





        # from edges to contours
        contours, hierarchy = cv2.findContours(edges,
                                            mode=cv2.RETR_LIST,
                                            method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
        # contours is a list containing array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) )

        ### first we want to filter out the bad stuff
        # to short
        good_contours = [c for c in contours if c.shape[0]>self.min_contour_size.value]
        # now we learn things about each contour through looking at the curvature.
        # For this we need to simplyfy the contour so that pt to pt angles become more meaningfull
        aprox_contours = [cv2.approxPolyDP(c,epsilon=1.5,closed=False) for c in good_contours]

        if self._window:
            x_shift = coarse_pupil_width*2
            color = zip(range(0,250,15),range(0,255,15)[::-1],range(230,250))
        split_contours = []
        for c in aprox_contours:
            curvature = GetAnglesPolyline(c)
            # we split whenever there is a real kink (abs(curvature)<right angle) or a change in the genreal direction
            kink_idx = find_kink_and_dir_change(curvature,80)
            segs = split_at_corner_index(c,kink_idx)

            #TODO: split at shart inward turns
            for s in segs:
                if s.shape[0]>2:
                    split_contours.append(s)
                    if self._window:
                        c = color.pop(0)
                        color.append(c)
                        s = s.copy()
                        s[:,:,0] += debug_img.shape[1]-coarse_pupil_width*2
                        # s[:,:,0] += x_shift
                        # x_shift += 5
                        cv2.polylines(debug_img,[s],isClosed=False,color=map(lambda x: x,c),thickness = 1,lineType=4)#cv2.CV_AA

        split_contours.sort(key=lambda x:-x.shape[0])
        # print [x.shape[0]for x in split_contours]
        if len(split_contours) == 0:
            # not a single usefull segment found -> no pupil found
            self.confidence.value = 0
            self.confidence_hist.append(0)
            if self._window:
                self.gl_display_in_window(debug_img)
            return {'timestamp':frame.timestamp,'norm_pupil':None}


        # removing stubs makes combinatorial search feasable
        split_contours = [c for c in split_contours if c.shape[0]>3]

        def ellipse_filter(e):
            in_center = padding < e[0][1] < pupil_img.shape[0]-padding and padding < e[0][0] < pupil_img.shape[1]-padding
            if in_center:
                is_round = min(e[1])/max(e[1]) >= self.min_ratio
                if is_round:
                    right_size = self.pupil_min.value <= max(e[1]) <= self.pupil_max.value
                    if right_size:
                        return True
            return False

        def ellipse_on_blue(e):
            center_on_dark = binary_img[e[0][1],e[0][0]]
            return bool(center_on_dark)

        def ellipse_support_ratio(e,contours):
            a,b = e[1][0]/2.,e[1][1]/2. # major minor radii of candidate ellipse
            ellipse_area =  np.pi*a*b
            ellipse_circumference = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            actual_area = cv2.contourArea(cv2.convexHull(np.concatenate(contours)))
            actual_contour_length = sum([cv2.arcLength(c,closed=False) for c in contours])
            area_ratio = actual_area / ellipse_area
            perimeter_ratio = actual_contour_length / ellipse_circumference #we assume here that the contour lies close to the ellipse boundary
            return perimeter_ratio,area_ratio


        def final_fitting(c,edges):
            #use the real edge pixels to fit, not the aproximated contours
            support_mask = np.zeros(edges.shape,edges.dtype)
            cv2.polylines(support_mask,c,isClosed=False,color=(255,255,255),thickness=2)
            # #draw into the suport mast with thickness 2
            new_edges = cv2.min(edges, support_mask)
            new_contours = cv2.findNonZero(new_edges)
            if self._window:
                new_edges[new_edges!=0] = 255
                overlay[:,:,1] = cv2.max(overlay[:,:,1], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
                overlay[:,:,2] = cv2.max(overlay[:,:,2], new_edges)
            new_e = cv2.fitEllipse(new_contours)
            return new_e,new_contours


        # finding poential candidates for ellipse seeds that describe the pupil.
        strong_seed_contours = []
        weak_seed_contours = []
        for idx, c in enumerate(split_contours):
            if c.shape[0] >=5:
                e = cv2.fitEllipse(c)
                # is this ellipse a plausible canditate for a pupil?
                if ellipse_filter(e):
                    distances = dist_pts_ellipse(e,c)
                    fit_variance = np.sum(distances**2)/float(distances.shape[0])
                    if fit_variance <= self.inital_ellipse_fit_threshhold:
                        # how much ellipse is supported by this contour?
                        perimeter_ratio,area_ratio = ellipse_support_ratio(e,[c])
                        # logger.debug('Ellipse no %s with perimeter_ratio: %s , area_ratio: %s'%(idx,perimeter_ratio,area_ratio))
                        if self.strong_perimeter_ratio_range[0]<= perimeter_ratio <= self.strong_perimeter_ratio_range[1] and self.strong_area_ratio_range[0]<= area_ratio <= self.strong_area_ratio_range[1]:
                            strong_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,100,100),thickness=4)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,100,100),thickness=3)
                        else:
                            weak_seed_contours.append(idx)
                            if self._window:
                                cv2.polylines(debug_img,[c],isClosed=False,color=(255,0,0),thickness=2)
                                e = (e[0][0]+debug_img.shape[1]-coarse_pupil_width*4,e[0][1]),e[1],e[2]
                                cv2.ellipse(debug_img,e,color=(255,0,0))

        sc = np.array(split_contours)


        if strong_seed_contours:
            seed_idx = strong_seed_contours
        elif weak_seed_contours:
            seed_idx = weak_seed_contours

        if not (strong_seed_contours or weak_seed_contours):
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        # if self._window:
        #     cv2.polylines(debug_img,[split_contours[i] for i in seed_idx],isClosed=False,color=(255,255,100),thickness=3)

        def ellipse_eval(contours):
            c = np.concatenate(contours)
            e = cv2.fitEllipse(c)
            d = dist_pts_ellipse(e,c)
            fit_variance = np.sum(d**2)/float(d.shape[0])
            return fit_variance <= self.inital_ellipse_fit_threshhold


        solutions = pruning_quick_combine(split_contours,ellipse_eval,seed_idx,max_evals=1000,max_depth=5)
        solutions = filter_subsets(solutions)
        ratings = []


        for s in solutions:
            e = cv2.fitEllipse(np.concatenate(sc[s]))
            if self._window:
                cv2.ellipse(debug_img,e,(0,150,100))
            support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
            support_ratio =  support_pixels.shape[0]/ellipse_circumference
            # TODO: refine the selection of final canditate
            if support_ratio >=self.final_perimeter_ratio_range[0] and ellipse_filter(e):
                ratings.append(support_pixels.shape[0])
                if support_ratio >=self.strong_perimeter_ratio_range[0]:
                    self.strong_prior = u_r.add_vector(p_r.add_vector(e[0])),e[1],e[2]
                    if self._window:
                        cv2.ellipse(debug_img,e,(0,255,255),thickness = 2)
            else:
                #not a valid solution, bad rating
                ratings.append(-1)


        # selected ellipse
        if max(ratings) == -1:
            #no good final ellipse found
            if self._window:
                self.gl_display_in_window(debug_img)
            self.confidence.value = 0
            self.confidence_hist.append(0)
            return {'timestamp':frame.timestamp,'norm_pupil':None}

        best = solutions[ratings.index(max(ratings))]
        e = cv2.fitEllipse(np.concatenate(sc[best]))

        #final calculation of goodness of fit
        support_pixels,ellipse_circumference = ellipse_true_support(e,raw_edges)
        support_ratio =  support_pixels.shape[0]/ellipse_circumference
        goodness = min(1.,support_ratio)

        #final fitting and return of result
        new_e,final_edges = final_fitting(sc[best],edges)
        size_dif = abs(1 - max(e[1])/max(new_e[1]))
        if ellipse_filter(new_e) and size_dif < .3:
            if self._window:
                cv2.ellipse(debug_img,new_e,(0,255,0))
            e = new_e


        pupil_ellipse = {}
        pupil_ellipse['confidence'] = goodness
        pupil_ellipse['ellipse'] = e
        pupil_ellipse['pos_in_roi'] = e[0]
        pupil_ellipse['major'] = max(e[1])
        pupil_ellipse['apparent_pupil_size'] = max(e[1])
        pupil_ellipse['minor'] = min(e[1])
        pupil_ellipse['axes'] = e[1]
        pupil_ellipse['angle'] = e[2]
        e_img_center =u_r.add_vector(p_r.add_vector(e[0]))
        norm_center = normalize(e_img_center,(frame.img.shape[1], frame.img.shape[0]),flip_y=True)
        pupil_ellipse['norm_pupil'] = norm_center
        pupil_ellipse['center'] = e_img_center
        pupil_ellipse['timestamp'] = frame.timestamp

        self.target_size.value = max(e[1])

        self.confidence.value = goodness
        self.confidence_hist.append(goodness)
        self.confidence_hist[:-200]=[]
        if self._window:
            #draw a little animation of confidence
            cv2.putText(debug_img, 'good',(410,debug_img.shape[0]-100), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'threshold',(410,debug_img.shape[0]-int(self.final_perimeter_ratio_range[0]*100)), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            cv2.putText(debug_img, 'no detection',(410,debug_img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX,0.3,(255,100,100))
            lines = np.array([[[2*x,debug_img.shape[0]-int(100*y)],[2*x,debug_img.shape[0]]] for x,y in enumerate(self.confidence_hist)])
            cv2.polylines(debug_img,lines,isClosed=False,color=(255,100,100))
            self.gl_display_in_window(debug_img)
        return pupil_ellipse
Esempio n. 50
0
def get_score(image):
    """Finds score data from given RGB image which is Image object"""
    data = {}

    # Create new bigger canvas where the table image can be rotated.
    # This is needed because otherwise the table might be rotated outside
    # of image bounds.
    new_size = (image.size[0] * 2, image.size[1] * 2)
    big = Image.new('RGB', new_size)

    # Place the actual image in the middle of the new empty canvas
    # The position was tested pretty much empirically
    big.paste(image, (int(image.size[0] / 1.5), image.size[1] / 2))

    array = np.array(big)
    # Convert RGB to BGR, because OpenCV uses BGR
    cv_image = array[:, :, ::-1].copy()

    if DEBUG:
        cv2.imwrite('debug/large.jpg', cv_image)

    logging.debug('Straightening table..')
    rotated_image = straighten_table(cv_image)

    if DEBUG:
        cv2.imwrite('debug/large_straight.jpg', rotated_image)

    # Find table corners
    logging.debug('Finding table corners..')
    bw_image = find_blue(rotated_image)

    if DEBUG:
        cv2.imwrite('debug/found_blue_large.jpg', bw_image)

    non_zero_pixels = cv2.findNonZero(bw_image)
    rect = cv2.minAreaRect(non_zero_pixels)
    precise_corners = cv2.cv.BoxPoints(rect)
    corners = np.int0(np.around(precise_corners))

    sorted_corners = [(x, y) for x, y in corners]
    tl, br = find_crop_corners(sorted_corners)
    sorted_corners.remove(tl)
    sorted_corners.remove(br)
    bl, tr = min(sorted_corners), max(sorted_corners)

    if DEBUG:
        label_tl_im = draw_label(rotated_image, tl, 'A')
        label_tl_im = draw_points(label_tl_im, [tl])
        cv2.imwrite('debug/corner_a.jpg', label_tl_im)

        label_bl_im = draw_label(rotated_image, bl, 'B')
        label_bl_im = draw_points(label_bl_im, [bl])
        cv2.imwrite('debug/corner_b.jpg', label_bl_im)

        label_br_im = draw_label(rotated_image, br, 'C')
        label_br_im = draw_points(label_br_im, [br])
        cv2.imwrite('debug/corner_c.jpg', label_br_im)

        label_tr_im = draw_label(rotated_image, tr, 'D')
        label_tr_im = draw_points(label_tr_im, [tr])
        cv2.imwrite('debug/corner_d.jpg', label_tr_im)

        labels = draw_label(rotated_image, tl, 'A')
        labels = draw_label(labels, bl, 'B')
        labels = draw_label(labels, br, 'C')
        labels = draw_label(labels, tr, 'D')
        labels = draw_points(labels, [tl, bl, br, tr])
        cv2.imwrite('debug/corner_labels.jpg', labels)

    # Find bounding boxes for scores
    logging.debug('Finding and cropping score blocks..')
    score_boxes = find_score_boxes([tl, bl, br, tr], rotated_image)
    if DEBUG:
        points = []
        for box in score_boxes:
            points += box

        # Add table corners
        points += [(x, y) for x, y in corners]

        im = draw_points(rotated_image, points)
        cv2.imwrite('debug/debug.jpg', im)

    score1_crop, score2_crop = crop_boxes(rotated_image, score_boxes)

    if DEBUG:
        cv2.imwrite('debug/left_score_blocks.jpg', score1_crop)
        cv2.imwrite('debug/right_score_blocks.jpg', score2_crop)

    logging.debug('Counting left score..')
    bw_image = find_orange(score1_crop)

    if DEBUG:
        cv2.imwrite('debug/left_score_blocks_black_white.jpg', bw_image)

    objects = find_object_centers(bw_image)
    data['leftScore'] = 10 - find_score(objects)

    if DEBUG:
        centers_im = draw_points(score1_crop, objects, radius=2)
        cv2.imwrite('debug/centers_left.jpg', centers_im)

        create_text_image('debug/left_score.jpg', 'Left: %s' % data['leftScore'])

    logging.debug('Counting right score..')
    image = Image.fromarray(score2_crop).convert('L')
    image = np.array(image, dtype=int)

    # Threshold
    T = 160
    bw_image = image > T
    if DEBUG:
        scipy.misc.imsave('debug/right_score_blocks_black_white.jpg', bw_image)

    objects = find_object_centers(bw_image)
    data['rightScore'] = find_score(objects)

    if DEBUG:
        centers_im = draw_points(score2_crop, objects, radius=2)
        cv2.imwrite('debug/centers_right.jpg', centers_im)

        create_text_image('debug/right_score.jpg', 'Right: %s' % data['rightScore'])

    return data
Esempio n. 51
0
    def detect(self,frame,u_roi,visualize=False):

        if self.window_should_open:
            self.open_window()
        if self.window_should_close:
            self.close_window()



        #get the user_roi
        img = frame.img
        r_img = img[u_roi.lY:u_roi.uY,u_roi.lX:u_roi.uX]
        gray_img = grayscale(r_img)


        # coarse pupil detection
        integral = cv2.integral(gray_img)
        integral =  np.array(integral,dtype=c_float)
        x,y,w,response = eye_filter(integral,self.coarse_filter_min,self.coarse_filter_max)
        p_roi = Roi(gray_img.shape)
        if w>0:
            p_roi.set((y,x,y+w,x+w))
        else:
            p_roi.set((0,0,-1,-1))
        coarse_pupil_center = x+w/2.,y+w/2.
        coarse_pupil_width = w/2.
        padding = coarse_pupil_width/4.
        pupil_img = gray_img[p_roi.lY:p_roi.uY,p_roi.lX:p_roi.uX]



        # binary thresholding of pupil dark areas
        hist = cv2.calcHist([pupil_img],[0],None,[256],[0,256]) #(images, channels, mask, histSize, ranges[, hist[, accumulate]])
        bins = np.arange(hist.shape[0])
        spikes = bins[hist[:,0]>40] # every intensity seen in more than 40 pixels
        if spikes.shape[0] >0:
            lowest_spike = spikes.min()
            highest_spike = spikes.max()
        else:
            lowest_spike = 200
            highest_spike = 255

        offset = self.intensity_range.value
        spectral_offset = 5
        if visualize:
            # display the histogram
            sx,sy = 100,1
            colors = ((0,0,255),(255,0,0),(255,255,0),(255,255,255))
            h,w,chan = img.shape
            hist *= 1./hist.max()  # normalize for display

            for i,h in zip(bins,hist[:,0]):
                c = colors[1]
                cv2.line(img,(w,int(i*sy)),(w-int(h*sx),int(i*sy)),c)
            cv2.line(img,(w,int(lowest_spike*sy)),(int(w-.5*sx),int(lowest_spike*sy)),colors[0])
            cv2.line(img,(w,int((lowest_spike+offset)*sy)),(int(w-.5*sx),int((lowest_spike+offset)*sy)),colors[2])
            cv2.line(img,(w,int((highest_spike)*sy)),(int(w-.5*sx),int((highest_spike)*sy)),colors[0])
            cv2.line(img,(w,int((highest_spike- spectral_offset )*sy)),(int(w-.5*sx),int((highest_spike - spectral_offset)*sy)),colors[3])

        # create dark and spectral glint masks
        self.bin_thresh.value = lowest_spike
        binary_img = bin_thresholding(pupil_img,image_upper=lowest_spike + offset)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
        cv2.dilate(binary_img, kernel,binary_img, iterations=2)
        spec_mask = bin_thresholding(pupil_img, image_upper=highest_spike - spectral_offset)
        cv2.erode(spec_mask, kernel,spec_mask, iterations=1)

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))

        #open operation to remove eye lashes
        pupil_img = cv2.morphologyEx(pupil_img, cv2.MORPH_OPEN, kernel)

        if self.blur.value >1:
            pupil_img = cv2.medianBlur(pupil_img,self.blur.value)

        edges = cv2.Canny(pupil_img,
                            self.canny_thresh.value,
                            self.canny_thresh.value*self.canny_ratio.value,
                            apertureSize= self.canny_aperture.value)


        # edges = cv2.adaptiveThreshold(pupil_img,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, self.canny_aperture.value, 7)

        # remove edges in areas not dark enough and where the glint is (spectral refelction from IR leds)
        edges = cv2.min(edges, spec_mask)
        edges = cv2.min(edges,binary_img)

        if visualize:
            overlay =  img[u_roi.lY:u_roi.uY,u_roi.lX:u_roi.uX][p_roi.lY:p_roi.uY,p_roi.lX:p_roi.uX]
            chn_img = grayscale(overlay)
            overlay[:,:,2] = cv2.max(chn_img,edges) #b channel
            overlay[:,:,0] = cv2.max(chn_img,binary_img) #g channel
            overlay[:,:,1] = cv2.min(chn_img,spec_mask) #b channel

            pupil_img = frame.img[u_roi.lY:u_roi.uY,u_roi.lX:u_roi.uX][p_roi.lY:p_roi.uY,p_roi.lX:p_roi.uX]
            # draw a frame around the automatic pupil ROI in overlay...
            pupil_img[::2,0] = 255,255,255
            pupil_img[::2,-1]= 255,255,255
            pupil_img[0,::2] = 255,255,255
            pupil_img[-1,::2]= 255,255,255

            pupil_img[::2,padding] = 255,255,255
            pupil_img[::2,-padding]= 255,255,255
            pupil_img[padding,::2] = 255,255,255
            pupil_img[-padding,::2]= 255,255,255

            frame.img[u_roi.lY:u_roi.uY,u_roi.lX:u_roi.uX][p_roi.lY:p_roi.uY,p_roi.lX:p_roi.uX] = pupil_img


        # from edges to contours
        contours, hierarchy = cv2.findContours(edges,
                                            mode=cv2.RETR_LIST,
                                            method=cv2.CHAIN_APPROX_NONE,offset=(0,0)) #TC89_KCOS
        # contours is a list containing array([[[108, 290]],[[111, 290]]], dtype=int32) shape=(number of points,1,dimension(2) )


        ### first we want to filter out the bad stuff
        # to short
        good_contours = [c for c in contours if c.shape[0]>self.min_contour_size]
        # now we learn things about each contour though looking at the curvature. For this we need to simplyfy the contour
        arprox_contours = [cv2.approxPolyDP(c,epsilon=1.5,closed=False) for c in good_contours]
        # cv2.drawContours(pupil_img,good_contours,-1,(255,255,0))
        # cv2.drawContours(pupil_img,arprox_contours,-1,(0,0,255))

        if self._window:
            debug_img = np.zeros(img.shape,img.dtype)

        x_shift = coarse_pupil_width*2 #just vor display
        color = zip(range(0,250,30),range(0,255,30)[::-1],range(230,250))
        split_contours = []
        for c in arprox_contours:
            curvature = GetAnglesPolyline(c)
            # print curvature
            # we split whenever there is a real kink (abs(curvature)<right angle) or a change in the genreal direction
            kink_idx = find_kink_and_dir_change(curvature,100)
            # kinks,k_index = convexity_defect(c,curvature)
            # print "kink_idx", kink_idx
            segs = split_at_corner_index(c,kink_idx)
            # print len(segs)
            # segs.sort(key=lambda e:-len(e))
            for s in segs:
                split_contours.append(s)
                if self._window:
                    c = color.pop(0)
                    color.append(c)
                    # if s.shape[0] >=5:
                    #     cv2.polylines(debug_img,[s],isClosed=False,color=c)
                    s = s.copy()
                    s[:,:,1] +=  coarse_pupil_width*2
                    cv2.polylines(debug_img,[s],isClosed=False,color=c)
                    s[:,:,0] += x_shift
                    x_shift += 5
                    cv2.polylines(debug_img,[s],isClosed=False,color=c)
        # return {'timestamp':frame.timestamp,'norm_pupil':None}

        #these segments may now be smaller, we need to get rid of those not long enough for ellipse fitting
        good_contours = [c for c in split_contours if c.shape[0]>=5]
        # cv2.polylines(img,good_contours,isClosed=False,color=(255,255,0))

        shape = edges.shape
        ellipses = ((cv2.fitEllipse(c),c) for c in good_contours)
        ellipses = ((e,c) for e,c in ellipses if (padding < e[0][1] < shape[0]-padding and padding< e[0][0] < shape[1]-padding)) # center is close to roi center
        ellipses = ((e,c) for e,c in ellipses if binary_img[e[0][1],e[0][0]]) # center is on a dark pixel
        ellipses = [(e,c) for e,c in ellipses if is_round(e,self.target_ratio)] # roundness test
        result = []
        for e,c in ellipses:
            size_dif = size_deviation(e,self.target_size.value)
            pupil_ellipse = {}
            pupil_ellipse['contour'] = c
            a,b = e[1][0]/2.,e[1][1]/2. # majar minor radii of candidate ellipse
            pupil_ellipse['circumference'] = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
            # pupil_ellipse['convex_hull'] = cv2.convexHull(pupil_ellipse['contour'])
            pupil_ellipse['contour_area'] = cv2.contourArea(cv2.convexHull(c))
            pupil_ellipse['ellipse_area'] = np.pi*a*b
            # print abs(pupil_ellipse['contour_area']-pupil_ellipse['ellipse_area'])
            if abs(pupil_ellipse['contour_area']-pupil_ellipse['ellipse_area']) <10:
                pupil_ellipse['goodness'] = abs(pupil_ellipse['contour_area']-pupil_ellipse['ellipse_area'])/10 #perfect match we'll take this one
            else:
                pupil_ellipse['goodness'] = size_dif
            if visualize:
                    pass
                    # cv2.drawContours(pupil_img,[cv2.convexHull(c)],-1,(size_dif,size_dif,255))
                    # cv2.drawContours(pupil_img,[c],-1,(size_dif,size_dif,255))
            pupil_ellipse['pupil_center'] = e[0] # compensate for roi offsets
            pupil_ellipse['center'] = u_roi.add_vector(p_roi.add_vector(e[0])) # compensate for roi offsets
            pupil_ellipse['angle'] = e[-1]
            pupil_ellipse['axes'] = e[1]
            pupil_ellipse['major'] = max(e[1])
            pupil_ellipse['minor'] = min(e[1])
            pupil_ellipse['ratio'] = pupil_ellipse['minor']/pupil_ellipse['major']
            pupil_ellipse['norm_pupil'] = normalize(pupil_ellipse['center'], (img.shape[1], img.shape[0]),flip_y=True )
            pupil_ellipse['timestamp'] = frame.timestamp
            result.append(pupil_ellipse)


        #### adding support
        if result:
            result.sort(key=lambda e: e['goodness'])
            # for now we assume that this contour is part of the pupil
            the_one = result[0]
            # (center, size, angle) = cv2.fitEllipse(the_one['contour'])
            # print "itself"
            distances =  dist_pts_ellipse(cv2.fitEllipse(the_one['contour']),the_one['contour'])
            # print np.average(distances)
            # print np.sum(distances)/float(distances.shape[0])
            # print "other"
            # if self._window:
                # cv2.polylines(debug_img,[result[-1]['contour']],isClosed=False,color=(255,255,255),thickness=3)
            with_another = np.concatenate((result[-1]['contour'],the_one['contour']))
            distances =  dist_pts_ellipse(cv2.fitEllipse(with_another),with_another)
            # if 1.5 > np.sum(distances)/float(distances.shape[0]):
            #     if self._window:
            #         cv2.polylines(debug_img,[result[-1]['contour']],isClosed=False,color=(255,255,255),thickness=3)

            perimeter_ratio =  cv2.arcLength(the_one["contour"],closed=False)/the_one['circumference']
            if perimeter_ratio > .9:
                size_thresh = 0
                eccentricity_thresh = 0
            elif perimeter_ratio > .5:
                size_thresh = the_one['major']/(5.)
                eccentricity_thresh = the_one['major']/2.
                self.should_sleep = True
            else:
                size_thresh = the_one['major']/(3.)
                eccentricity_thresh = the_one['major']/2.
                self.should_sleep = True
            if self._window:
                center = np.uint16(np.around(the_one['pupil_center']))
                cv2.circle(debug_img,tuple(center),int(eccentricity_thresh),(0,255,0),1)

            if self._window:
                cv2.polylines(debug_img,[the_one["contour"]],isClosed=False,color=(255,0,0),thickness=2)
                s = the_one["contour"].copy()
                s[:,:,0] +=coarse_pupil_width*2
                cv2.polylines(debug_img,[s],isClosed=False,color=(255,0,0),thickness=2)
            # but are there other segments that could be used for support?
            new_support = [the_one['contour'],]
            if len(result)>1:
                the_one = result[0]
                target_axes = the_one['axes'][0]
                # target_mean_curv = np.mean(curvature(the_one['contour'])
                for e in result:

                    # with_another = np.concatenate((e['contour'],the_one['contour']))
                    # with_another = np.concatenate([r['contour'] for r in result])
                    with_another = e['contour']
                    distances =  dist_pts_ellipse(cv2.fitEllipse(with_another),with_another)
                    # print np.std(distances)
                    thick =  int(np.std(distances))
                    if 1.5 > np.average(distances) or 1:
                        if self._window:
                            # print thick
                            thick = min(20,thick)
                            cv2.polylines(debug_img,[e['contour']],isClosed=False,color=(255,255,255),thickness=thick)

                    if self._window:
                        cv2.polylines(debug_img,[e["contour"]],isClosed=False,color=(0,100,100))
                    center_dist = cv2.arcLength(np.array([the_one["pupil_center"],e['pupil_center']],dtype=np.int32),closed=False)
                    size_dif = abs(the_one['major']-e['major'])

                    # #lets make sure the countour is not behind the_one/'s coutour
                    # center_point = np.uint16(np.around(the_one['pupil_center']))
                    # other_center_point = np.uint16(np.around(e['pupil_center']))

                    # mid_point =  the_one["contour"][the_one["contour"].shape[0]/2][0]
                    # other_mid_point =  e["contour"][e["contour"].shape[0]/2][0]

                    # #reflect around mid_point
                    # p = center_point - mid_point
                    # p = np.array((-p[1],-p[0]))
                    # mir_center_point = p + mid_point
                    # dist_mid = cv2.arcLength(np.array([mid_point,other_mid_point]),closed=False)
                    # dist_center = cv2.arcLength(np.array([center_point,other_mid_point]),closed=False)
                    # if self._window:
                    #     cv2.circle(debug_img,tuple(center_point),3,(0,255,0),2)
                    #     cv2.circle(debug_img,tuple(other_center_point),2,(0,0,255),1)
                    #     # cv2.circle(debug_img,tuple(mir_center_point),3,(0,255,0),2)
                    #     # cv2.circle(debug_img,tuple(mid_point),2,(0,255,0),1)
                    #     # cv2.circle(debug_img,tuple(other_mid_point),2,(0,0,255),1)
                    #     cv2.polylines(debug_img,[np.array([center_point,other_mid_point]),np.array([mid_point,other_mid_point])],isClosed=False,color=(0,255,0))


                    if center_dist < eccentricity_thresh:
                    # print dist_mid-dist_center
                    # if dist_mid > dist_center-20:

                        if  size_dif < size_thresh:


                            new_support.append(e["contour"])
                            if self._window:
                                cv2.polylines(debug_img,[s],isClosed=False,color=(255,0,0),thickness=1)
                                s = e["contour"].copy()
                                s[:,:,0] +=coarse_pupil_width*2
                                cv2.polylines(debug_img,[s],isClosed=False,color=(255,255,0),thickness=1)

                        else:
                            if self._window:
                                s = e["contour"].copy()
                                s[:,:,0] +=coarse_pupil_width*2
                                cv2.polylines(debug_img,[s],isClosed=False,color=(0,0,255),thickness=1)
                    else:
                        if self._window:
                            cv2.polylines(debug_img,[s],isClosed=False,color=(0,255,255),thickness=1)

                    # new_support = np.concatenate(new_support)

            self.goodness.value = the_one['goodness']

            ###here we should AND original mask, selected contours with 2px thinkness (and 2px fitted ellipse -is the last one a good idea??)
            support_mask = np.zeros(edges.shape,edges.dtype)
            cv2.polylines(support_mask,new_support,isClosed=False,color=(255,255,255),thickness=2)
            # #draw into the suport mast with thickness 2
            new_edges = cv2.min(edges, support_mask)
            new_contours = cv2.findNonZero(new_edges)
            if self._window:
                debug_img[0:support_mask.shape[0],0:support_mask.shape[1],2] = new_edges


            ###### do the ellipse fit and filter think again
            ellipses = ((cv2.fitEllipse(c),c) for c in [new_contours])
            ellipses = ((e,c) for e,c in ellipses if (padding < e[0][1] < shape[0]-padding and padding< e[0][0] < shape[1]-padding)) # center is close to roi center
            ellipses = ((e,c) for e,c in ellipses if binary_img[e[0][1],e[0][0]]) # center is on a dark pixel
            ellipses = [(size_deviation(e,self.target_size.value),e,c) for e,c in ellipses if is_round(e,self.target_ratio)] # roundness test
            for size_dif,e,c in ellipses:
                pupil_ellipse = {}
                pupil_ellipse['contour'] = c
                a,b = e[1][0]/2.,e[1][1]/2. # majar minor radii of candidate ellipse
                # pupil_ellipse['circumference'] = np.pi*abs(3*(a+b)-np.sqrt(10*a*b+3*(a**2+b**2)))
                # pupil_ellipse['convex_hull'] = cv2.convexHull(pupil_ellipse['contour'])
                pupil_ellipse['contour_area'] = cv2.contourArea(cv2.convexHull(c))
                pupil_ellipse['ellipse_area'] = np.pi*a*b
                # print abs(pupil_ellipse['contour_area']-pupil_ellipse['ellipse_area'])
                if abs(pupil_ellipse['contour_area']-pupil_ellipse['ellipse_area']) <10:
                    pupil_ellipse['goodness'] = 0 #perfect match we'll take this one
                else:
                    pupil_ellipse['goodness'] = size_dif
                if visualize:
                        pass
                        # cv2.drawContours(pupil_img,[cv2.convexHull(c)],-1,(size_dif,size_dif,255))
                        # cv2.drawContours(pupil_img,[c],-1,(size_dif,size_dif,255))
                pupil_ellipse['center'] = u_roi.add_vector(p_roi.add_vector(e[0])) # compensate for roi offsets
                pupil_ellipse['angle'] = e[-1]
                pupil_ellipse['axes'] = e[1]
                pupil_ellipse['major'] = max(e[1])
                pupil_ellipse['minor'] = min(e[1])
                pupil_ellipse['ratio'] = pupil_ellipse['minor']/pupil_ellipse['major']
                pupil_ellipse['norm_pupil'] = normalize(pupil_ellipse['center'], (img.shape[1], img.shape[0]),flip_y=True )
                pupil_ellipse['timestamp'] = frame.timestamp
                result = [pupil_ellipse,]
            # the_new_one = result[0]

            #done - if the new ellipse is good, we just overwrote the old result

        if self._window:
            self.gl_display_in_window(debug_img)
            if self.should_sleep:
                # sleep(3)
                self.should_sleep = False
        if result:
            # update the target size
            if result[0]['goodness'] >=3: # perfect match!
                self.target_size.value = result[0]['major']
            else:
                self.target_size.value  = self.target_size.value +  .2 * (result[0]['major']-self.target_size.value)
                result.sort(key=lambda e: abs(e['major']-self.target_size.value))
            if visualize:
                pass
            return result[0]

        else:
            self.goodness.value = 100
            no_result = {}
            no_result['timestamp'] = frame.timestamp
            no_result['norm_pupil'] = None
            return no_result
def main(num):
    print num
    img = cv2.imread('Sample Images/sample ('+str(num)+').jpg') 

    imgEdges = cv2.Canny(img,100,200)
    
    #ret,imgThresh1 = cv2.threshold(imgEdges,150,255,cv2.THRESH_BINARY_INV)

    #imgLaplacian = cv2.Laplacian(img,cv2.CV_64F)
    # imgSobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
    # imgSobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=5)
    
    # for i in range(0,img.shape[1]):
    #   for j in range(0,img.shape[0]):
            
    #       #b1,g1,r1 = img[j,i]

    #       imgTransformed[j,i][0] = math.sqrt((imgSobely[j,i][0]*imgSobely[j,i][0]) + (imgSobelx[j,i][0]*imgSobelx[j,i][0]))
    #       imgTransformed[j,i][1] = math.sqrt((imgSobely[j,i][1]*imgSobely[j,i][1]) + (imgSobelx[j,i][1]*imgSobelx[j,i][1]))
    #       imgTransformed[j,i][2] = math.sqrt((imgSobely[j,i][2]*imgSobely[j,i][2]) + (imgSobelx[j,i][2]*imgSobelx[j,i][2]))
    
    # print imgTransformed
    
    # imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # imgBlurred = cv2.GaussianBlur(imgGray, (5,5), 0)
    
    #imgThresh = cv2.adaptiveThreshold(imgEdges,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
    
    ret,imgThresh = cv2.threshold(imgEdges,127,255,cv2.THRESH_BINARY_INV)
    
    erosion = cv2.erode(imgThresh,kernel,iterations = 1)
    erosion = cv2.medianBlur(erosion, 3)
    erosionCopy = erosion.copy()
    #img1 = cv2.cvtColor(erosion, cv2.COLOR_GRAY2RGB)
    # dilation = cv2.dilate(imgThresh, kernel, iterations = 1)

    # opening = cv2.dilate(erosion, kernel, iterations = 1)
    # closing = cv2.erode(dilation, kernel, iterations = 1)
        
    npaContours, npaHierarchy = cv2.findContours(erosion,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

    #print len(npaContours)
    # for c in npaContours:
    #     if cv2.contourArea(c)>10 and cv2.contourArea(c)<8000: 
    #         [intX, intY, intWidth, intHeight] = cv2.boundingRect(c)

    #         crop = erosionCopy[intY:intY+intHeight,intX:intX+intWidth]    
            
    #         cv2.rectangle(img,(intX, intY),(intX + intWidth, intY + intHeight),(127, 255, 0),1)
    #         # cv2.imshow('crop',crop)
    #         # cv2.waitKey(0)

    pixelpoints = 0
    #filterContour(npaContours)
    mask = np.zeros(imgThresh.shape,np.uint8)
    mask_testing = np.zeros(imgThresh.shape,np.uint8)
    heri_prev1 = npaHierarchy[0][0][2]
    heri_prev2 = npaHierarchy[0][0][3]


####################################################################################################################################################

    for c in range (0,len(npaContours)):
        flag_wrong_contour = 0
        if cv2.contourArea(npaContours[c])>10 and cv2.contourArea(npaContours[c])<8000:
            heri_next = npaHierarchy[0][c][3]
            #print cv2.contourArea(npaContours[c]),npaHierarchy[0][c]

            if (heri_prev1 - heri_next == 1):
                #print 'yoo baby'
                cv2.drawContours(mask,[npaContours[c]],0,0,-1)
                pixelpoints = cv2.findNonZero(mask)
                for i in range(len(pixelpoints)):
                 
                    if pixelpoints[i][0][0] == 1 or pixelpoints[i][0][0] == 198 or pixelpoints[i][0][1] == 1 or pixelpoints[i][0][1] == 198:

                        if erosionCopy[pixelpoints[i][0][1],pixelpoints[i][0][0]] == 255:
                            cv2.circle(mask,(pixelpoints[i][0][0],pixelpoints[i][0][1]),1,128,-1)
                            flag_wrong_contour = 1               
                
                if flag_wrong_contour == 0:   
                    cv2.drawContours(mask_testing,[npaContours[c]],0,0,-1)
                
            ### Pixel points of a contour
            else:
                if cv2.contourArea(npaContours[c])>=200:
                    mask = np.zeros(imgThresh.shape,np.uint8)
                    cv2.drawContours(mask,[npaContours[c]],0,255,-1)
                    pixelpoints = cv2.findNonZero(mask)
                    for i in range(len(pixelpoints)):
                        
                        if pixelpoints[i][0][0] == 1 or pixelpoints[i][0][0] == 198 or pixelpoints[i][0][1] == 1 or pixelpoints[i][0][1] == 198:
    
                            if erosionCopy[pixelpoints[i][0][1],pixelpoints[i][0][0]] == 255:
                                cv2.circle(mask,(pixelpoints[i][0][0],pixelpoints[i][0][1]),1,128,-1)
                                flag_wrong_contour = 1               
                    if flag_wrong_contour == 0:     
                        cv2.drawContours(mask_testing,[npaContours[c]],0,255,-1)
                    
                    pixelpoints = cv2.findNonZero(mask)
            
            #cv2.imshow('mask',mask_testing)
            #cv2.imshow('erosionCopy',erosionCopy)

            #print pixelpoints
            #cv2.waitKey(0)
        heri_prev1 = npaHierarchy[0][c][2]    
        heri_prev2 = npaHierarchy[0][c][3]
    
    ret,mask_testing = cv2.threshold(mask_testing,127,255,cv2.THRESH_BINARY_INV)

#################################################################################################################################################################


    #     for i in range(200):
    #         for j in range(200):
    #             if cv2.contourArea(npaContours[c])>200 and cv2.contourArea(npaContours[c])<8000:
    #                 dist = cv2.pointPolygonTest(npaContours[c],(j,i),True)
    #                 if dist >= 0:
    #                     erosionCopy[i,j] = 127

    # for i in range(200):
    #     for j in range(200):

    #         if erosionCopy[i,j] < 50:
    #             erosionCopy[i,j] = 255

    # print erosion
    # print erosion.shape
    
##### OPERATION AFTER FIRST SET COMPLETED

    print "Pass 1 Completed.."
    img_pass1 = mask_testing.copy()
    img_pass1Copy = img_pass1.copy()
    npaContours, npaHierarchy = cv2.findContours(img_pass1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    
    #print len(npaContours)

    # for c in npaContours:
    #     if cv2.contourArea(c)>10 and cv2.contourArea(c)<8000: 
    #         [intX, intY, intWidth, intHeight] = cv2.boundingRect(c)

    #         crop = img_pass1Copy[intY:intY+intHeight,intX:intX+intWidth]    

    #         cv2.rectangle(img,(intX, intY),(intX + intWidth, intY + intHeight),(127, 255, 0),1)
    #         cv2.imshow('crop',crop)
    #         cv2.waitKey(0)

    r = [0,0]
    flag = 0 
    boundary_points = []
    flag_mid = 0
    for c in range(1,len(npaContours)):
        
        mask = np.zeros(img_pass1Copy.shape,np.uint8)
        cv2.drawContours(mask,npaContours,c,127,1)

        for i in range(0,mask.shape[0]):
            for j in range(0,mask.shape[1]):
                if img_pass1Copy[j,i] == 255 and mask[j,i] == 127:
                    boundary_points.append([j,i])
                    print erosionCopy[j,i]
                      if erosionCopy[j,i] == 0:                                 ''' WORKING HERE !!!!!!!!!!!!!!!'''
                        print 'yeah'
                    cv2.circle(erosionCopy,(i,j),1,200,-1)
Esempio n. 53
0
def process_frame_diff_optical(video_path):
    cap = cv2.VideoCapture(video_path)
    (background_model, _) = grab_and_convert_frame(cap)
    # No more frames left to grab or something went wrong
    if background_model is None:
        return

    dilation_kernel = np.ones((3, 3), np.uint8)
    # Parameters for lucas kanade optical flow
    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    # Create some random colors
    color = np.random.randint(0, 255, (100, 3))

    feature_params = dict(maxCorners=100,
                          qualityLevel=0.3,
                          minDistance=7,
                          blockSize=7)

    while True:
        frame, orig = grab_and_convert_frame(cap)
        if frame is None:
            break

        # calculate the difference
        delta = cv2.absdiff(frame, background_model)
        thresh = cv2.threshold(delta, 50, 255, cv2.THRESH_BINARY)[1]
        dilation = cv2.dilate(thresh, dilation_kernel, iterations=1)
        nonzeros = cv2.findNonZero(dilation)

        frame_changed = frame.copy();
        # Create a mask image for drawing purposes
        mask = np.zeros_like(background_model)
        if nonzeros is not None and len(nonzeros) > 0:
            nonzeros = np.float32(nonzeros)
            p1, st, err = cv2.calcOpticalFlowPyrLK(background_model, frame_changed, nonzeros, None, **lk_params)

            # Select good points
            good_new = p1[st == 1]
            good_old = nonzeros[st == 1]

            # draw the tracks
            for i, (new, old) in enumerate(zip(good_new, good_old)):
                # print(i, (new, old))
                a, b = new.ravel()
                c, d = old.ravel()
                cv2.line(mask, (a, b), (c, d), color[i % 100].tolist(), 2)
                cv2.circle(frame_changed, (a, b), 5, color[i % 100].tolist(), -1)

            frame_changed = cv2.add(frame_changed, mask)

        # display frames
        cv2.imshow("Current frame", frame_changed)
        # cv2.imshow("Background model", background_model)
        cv2.imshow("Diff", dilation)

        # current frame becomes background model
        background_model = frame

        # break loop on user input
        k = cv2.waitKey(1) & 0xff
        if k == ord("q"):
            break
        elif k == ord('p'):
            cv2.imwrite("test_frame_" + str(time.strftime("%d-%m-%Y-%H-%M-%S")) + ".png", frame)
            cv2.imwrite("test_thresh_" + str(time.strftime("%d-%m-%Y-%H-%M-%S")) + ".png", dilation)

    cap.release()
    cv2.destroyAllWindows()
Esempio n. 54
0
def extract_char(img, num=True):
    """
    Takes a block of handwritten characters and prints the recognized output.
    :param img: input image of block of handwritten characters.
    :param num: if the characters are numeric.
    :return: a list of indices of where the spaces occur in the input.
    """
    img_final_bin = find_boxes(img)

    # Find contours for image, which should detect all the boxes
    contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    (contours, boundingBoxes) = sort_contours(contours)

    # Find the convex hull of each contour to get the correct outline of the box/square
    new_contours = []
    for k in range(len(contours)):
        new_contours.append(cv2.convexHull(contours[k], returnPoints=True))

    if num:
        cropped_dir_path = "/home/vagrant/Markus/lib/scanner/nums/1/"
    else:
        cropped_dir_path = "/home/vagrant/Markus/lib/scanner/names/1/"

    # Remove previous images
    olddir = cropped_dir_path + "*"
    r = glob.glob(olddir)
    for png in r:
        os.remove(png)

    box_num = 0
    reached_x = 0
    spaces = []

    for c in new_contours:
        x, y, w, h = cv2.boundingRect(c)
        # check if box's edges are less than half the height of image (not likely to be a box with handwritten char)
        if w < np.array(img).shape[0] // 2 or h < np.array(img).shape[0] // 2:
            continue
        # check if this is an inner contour who's area has already been covered by another contour
        if x + w // 2 < reached_x:
            continue
        # check the contour has a square-like shape
        if abs(w - h) < abs(min(0.5*w, 0.5*h)):
            box_num += 1
            cropped = img[y:y + h, x:x + w]
            resized = cv2.resize(cropped, (28, 28))

            # check if this is an empty box (space)
            pts = cv2.findNonZero(resized)
            if pts is None or len(pts) < 40:
                spaces.append(box_num)
                continue

            if num:
                new_img = process_num(cropped)
            else:
                new_img = process_char(cropped)
            cv2.imwrite(cropped_dir_path + str(box_num).zfill(2) + '.png', new_img)
            reached_x = x + w

    return spaces
Esempio n. 55
0
def main():
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    #outGrid = cv2.VideoWriter('sep/0_0.grid.avi', fourcc, 20.0, (1280, 720))
    #outFull = cv2.VideoWriter('sep/0_0.full.avi', fourcc, 20.0, (1280, 720))
    #cap = cv2.VideoCapture('sep/0_0.avi')
    #outGrid = cv2.VideoWriter('sep/25_68351.grid.avi',fourcc, 20.0, (1280,720))
    #outFull = cv2.VideoWriter('sep/25_68351.full.avi',fourcc, 20.0, (1280,720))
    cap = cv2.VideoCapture('sep/25_68351.avi')
    fn = 0
    ret, iframe = cap.read()
    H, W, _ = iframe.shape
    tpl = template()
    M = initM()

    visTpl = cv2.warpPerspective(tpl, visM(), (1280, 720))
    cRot = cv2.warpPerspective(tpl, M, (1280, 720))
    fullCourt = []
    fullImg = np.zeros_like(iframe)

    m = np.eye(3)
    tic = time.clock()
    MS = [M]
    Theta = 10
    #params = np.array([ [1.1, .9, 1.2], [.9, 1.1, 1.2], [.9, .9, 1] ])
    while(cap.isOpened()):
        ret, frame = cap.read()
        if not ret:
            break

        fn += 1
        # if fn%2 == 0: continue
        # if fn % 6 == 0: continue
        frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        threshColor = cv2.inRange(
            frameHSV, np.array([0, 47, 151]), np.array([16, 255, 255]))
        threshColor = cv2.morphologyEx(
            threshColor, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
        edges = cv2.Canny(threshColor, 200, 255, apertureSize=3)
        edges[565:650, 240:950] = 0
        #frame[565:650, 240:950] = 0
        cv2.circle(edges, (1042, 620), 29, 0, -1)

        dstIdx = cv2.findNonZero(edges).reshape(-1, 2)
        if len(dstIdx) < 5000:
            MS.append(MS[-1])
            continue
        nbrs = NearestNeighbors(
            n_neighbors=1, radius=1.0, algorithm='auto').fit(dstIdx)

        cnt = Theta
        converge = 100

        while cnt:
            img = frame.copy()
            cnt -= 1
            #blank = np.zeros_like(frame)

            cv2.warpPerspective(tpl, np.dot(m, M), (W, H), dst=cRot)
            # if len(nKeys) < 8000: break
            if fn  == 1 :
                oKeys, nKeys = neighbors(cRot, dstIdx, nbrs, d=10)
                dm, ret = cv2.findHomography(oKeys, nKeys, method=cv2.LMEDS)
            else:
                oKeys, nKeys = neighbors(cRot, dstIdx, nbrs, d=10)
                dm = estimate(oKeys, nKeys)
                #dm = ransac(oKeys, nKeys)
            
            print dm
            #import ipdb;
            # ipdb.set_trace()
            if dm is None:
                dm = np.eye(3)

            #converge = np.linalg.norm(dm - np.eye(3))

            # if converge < 0.45:
             #   break
            #dm = 1.2 * (dm - np.eye(3)) + np.eye(3)
            m = np.dot(dm, m)
            m = m / m[2, 2]
            img[...,1] = cv2.bitwise_or(cRot, img[...,1])
            for o, n in zip(oKeys, nKeys):
                cv2.line(img, (o[0], o[1]), (n[0], n[1]), (255,255,255), 1)
            while False:
                cv2.imshow('edges', edges)
                key = cv2.waitKey(5) & 0xFF
                if key == ord('q'):
                    return
                if key == ord('a'):
                    break
                if key == ord('c'):
                    cnt = False
                    break

        M = np.dot(m, M)
        M = M / M[2, 2]
        MS.append(M)

        alpha = np.sqrt(m[0, 2] * m[2, 0])
        gamma = np.sqrt(-m[1, 2] * m[2, 1])
        f = - m[1, 2] / gamma
        r = m[0, 2] / (alpha * f)

        # print converge
        # print 50 - cnt
        if fn > 2:
            m = .6 * (m - np.eye(3)) + np.eye(3)
        else:
            m = np.eye(3)
        img[..., 1] = cv2.bitwise_or(cRot, img[..., 1])

        inv = cv2.warpPerspective(frame, np.dot(M, np.linalg.inv(visM())),
                                  (W, H), flags=cv2.WARP_INVERSE_MAP,
                                  borderMode=cv2.BORDER_CONSTANT,
                                  borderValue=(0, 0, 0))
        fmask = cv2.warpPerspective(np.zeros_like(cRot),
                                    np.dot(M, np.linalg.inv(visM())),
                                    (W, H), flags=cv2.WARP_INVERSE_MAP,
                                    borderMode=cv2.BORDER_CONSTANT,
                                    borderValue=255)
        #inv[...,1] = cv2.bitwise_or(visTpl, inv[...,1])

        fullT1 = cv2.bitwise_and(fullImg, fullImg, mask=fmask)

        fullImg = cv2.addWeighted(fullImg, 0.99, inv, 0.01, 0.45)
        #fullImg = inv.copy()
        fmaskI = cv2.bitwise_not(fmask)
        fullImg = cv2.bitwise_or(fullImg, fullT1)

        visImg = cv2.bitwise_and(inv, inv, mask=fmaskI)
        bg = cv2.bitwise_and(fullImg, fullImg, mask=fmask)
        visImg = cv2.add(visImg, bg)
        visImg[..., 1] = cv2.bitwise_or(visTpl, visImg[..., 1])
        toc = time.clock()

        # sys.stdout.write("\rI[%s] #%s %.4f %.4f sec/frame\n" %
        #                  (Theta - cnt, fn, converge, (toc - tic) / fn))
        # sys.stdout.write("\r%.4f %.4f %.4f %.4f" % (alpha, gamma, f, r))
        # sys.stdout.flush()
        cv2.putText(img, "[%d]#f %d %.2f %.2f sec/frame" % (Theta - cnt, fn, converge,
                                                            (toc - tic) / fn), (10, 30), FONT, 1, (255, 255, 255), 1, cv2.LINE_AA)
        cv2.imshow('frame', img)
        cv2.putText(visImg, "[%d]#f %d %.2f %.2f sec/frame" % (Theta - cnt, fn, converge,
                                                               (toc - tic) / fn), (10, 600), FONT, 1, (255, 255, 255), 1, cv2.LINE_AA)
        cv2.imshow('inv', visImg)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            return
        # outGrid.write(img)
        # outFull.write(visImg)
    MS = np.array(MS)
Esempio n. 56
0
#cv2.destroyWindow('window')

# normalized image difference
mFrame1 = cv2.cvtColor(mFrame1, cv2.COLOR_BGR2GRAY)
mFrame2 = cv2.cvtColor(mFrame2, cv2.COLOR_BGR2GRAY)
mFrame1 = cv2.GaussianBlur(mFrame1,(5,5),3)
mFrame2 = cv2.GaussianBlur(mFrame2,(5,5),3)

frameAvg = (np.double(mFrame1) + np.double(mFrame2))/2
frameDiff1 = cv2.absdiff(mFrame1,mFrame2)
retval, frameDiff2 = cv2.threshold(frameDiff1, 25, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#frameDiff3 = cv2.dilate(frameDiff2, None, iterations = 1)
frameDiff4 = cv2.erode(frameDiff2, None, iterations = 2)
#im, contours, _ = cv2.findContours(frameDiff4.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#contours2 = sorted(contours,key=cv2.contourArea,reverse=True)[0]
pixelPoints = cv2.findNonZero(frameDiff4)
pixelPoints2 = pixelPoints[:,0]
#plt.plot(pixelPoints2[:,0],pixelPoints2 [:,1])
#plt.show()
hull = cv2.convexHull(pixelPoints)

cv2.drawContours(frameDiff1,[hull],-1,(255,255,255),5)

cv2.imshow('window',frameDiff1)
cv2.waitKey(0)
cv2.destroyWindow('window')




def auto_crop(image_source):
	"""Return a rotated and cropped version of the source image"""
	
	# First slightly crop edge - some images had a rogue 2 pixel black edge on one side
	init_crop = 10
	h, w = image_source.shape[:2]
	image_source = image_source[init_crop:init_crop+(h-init_crop*2), init_crop:init_crop+(w-init_crop*2)]
	# Add back a white border
	
	image_source = cv2.copyMakeBorder(image_source, 5,5,5,5, cv2.BORDER_CONSTANT, value=(255,255,255))
	
	image_gray = cv2.cvtColor(image_source, cv2.COLOR_BGR2GRAY)
	_, image_thresh = cv2.threshold(image_gray, THRESHOLD, 255, cv2.THRESH_BINARY)
	
	image_thresh2 = image_thresh.copy()
	image_thresh2 = cv2.Canny(image_thresh2, 100, 100, apertureSize=3)

	points = cv2.findNonZero(image_thresh2)
	centre, dimensions, theta = cv2.minAreaRect(points)
	rect = cv2.minAreaRect(points)
	
	width = int(dimensions[0])
	height = int(dimensions[1])
	
	box = cv2.boxPoints(rect)
	box = np.int0(box)
	
	M = cv2.moments(box)	
	cx = int(M['m10']/M['m00'])
	cy = int(M['m01']/M['m00'])
	
	image_patch = sub_image(image_source, (cx, cy), theta+90, height, width)
	
	# add back a small white border
	image_patch = cv2.copyMakeBorder(image_patch, 1,1,1,1, cv2.BORDER_CONSTANT, value=(255,255,255))
	
	# Convert image to binary, edge is black. Do edge detection and convert edges to a list of points.
	# Then calculate a minimum set of points that can enclose the points.
	
	_, image_thresh = cv2.threshold(image_patch, THRESHOLD, 255, 1)
	image_thresh = cv2.Canny(image_thresh, 100, 100, 3)
	points = cv2.findNonZero(image_thresh)
	hull = cv2.convexHull(points)
	
	# Find min epsilon resulting in exactly 4 points, typically between 7 and 21
	# This is the smallest set of 4 points to enclose the image.
	for epsilon in range(3, 50):
		hull_simple = cv2.approxPolyDP(hull, epsilon, 1)
		
		if len(hull_simple) == 4:
			break

	hull = hull_simple

	# Find closest fitting image size and warp/crop to fit, i.e. reduce scaling to a minimum.
	
	x,y,w,h = cv2.boundingRect(hull)
	target_corners = np.array([[0,0],[w,0],[w,h],[0,h]], np.float32)
	
	# Sort hull into tl,tr,br,bl order. 
	# n.b. hull is already sorted in clockwise order, we just need to know where top left is.
	
	source_corners = hull.reshape(-1,2).astype('float32')
	min_dist = 100000
	index = 0
	
	for n in xrange(len(source_corners)):
		x,y = source_corners[n]
		dist = math.hypot(x,y)
		
		if dist < min_dist:
			index = n
			min_dist = dist
	
	# Rotate the array so tl is first
	source_corners = np.roll(source_corners , -(2*index))
	
	try:
		transform = cv2.getPerspectiveTransform(source_corners, target_corners)
		return cv2.warpPerspective(image_patch, transform, (w,h))
		
	except:
		print "Warp failure"
		return image_patch
############part2
#morphological operations to find boundary of foreground or background of fundus image

kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(20,20)); #structural element of size 20*20
mask = cv2.erode(mask,kernel,iterations = 1); #erosion of image

kernel1=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)); #structural element of size 3*3
bound_mask=cv2.erode(mask,kernel1); #erosion of image

boundary=mask & (~bound_mask); #extracted boundary image


############part3

non_zero=cv2.findNonZero(boundary);#finding non zero pixel locations in the boundary
row,row_r,row_c=non_zero.shape;
#rearraging non_zero matrix to form as x*2 matrix 

n_z = np.zeros((row,row_c),np.float32); 

for i in xrange(row):
    for j in xrange(row_c):
        n_z[i,j]=non_zero[i][0,j]
        
#morphological operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,1)); #structural element
mask_eroded = cv2.erode(mask,kernel,iterations = 1); #erosion of image

non_zero_1=cv2.findNonZero(~mask)
row_1,row_r_1,row_c_1=non_zero_1.shape;