Ejemplo n.º 1
0
def getFlyContours(img):
   minFlyArea = flyTrackerSettings.minFlyArea     # 900
   minFlyAreaNorm = flyTrackerSettings.minFlyAreaNorm # 0.0045
   arenaCoords = flyTrackerSettings.arenaCoords
   nFlies = flyTrackerSettings.nFlies
   # maxFlyAreaNorm = 0.02
   if cv2to3.isCV2(): # opencv2
      contours, hierarchy = cv2.findContours(img,mode=cv2.RETR_EXTERNAL,method=cv2.CHAIN_APPROX_SIMPLE) # RETR_EXTERNAL? use sure_bg?
   else: # opencv3+
      image, contours, hierarchy = cv2.findContours(img,mode=cv2.RETR_EXTERNAL,method=cv2.CHAIN_APPROX_SIMPLE) # RETR_EXTERNAL? use sure_bg?

   area = [0]*len(contours)
   for idx,cnt in enumerate(contours):
      area[idx] = cv2.contourArea(cnt)

   arenaArea = abs((arenaCoords[1]-arenaCoords[3])*(arenaCoords[0]-arenaCoords[2]))
   index = 0
   for cntInd,cnt in enumerate(contours):
      # print(area[cntInd]/arenaArea)
      if area[cntInd]/arenaArea > minFlyAreaNorm:
         contours[index]=contours[cntInd]
         index = index + 1
   contours = contours[0:index]

   return contours
Ejemplo n.º 2
0
def border_mask(img, p1, p2, device, debug, color="black"):
  # by using rectangle_mask to mask the edge of plotting regions you end up missing the border of the images by 1 pixel
  # This function fills this region in
  # note that p1 = (0,0) is the top left hand corner bottom right hand corner is p2 = (max-value(x), max-value(y))
  # device = device number. Used to count steps in the pipeline
  # debug = True/False. If True; print output image
  if color=="black":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (255,255,255))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(255,255,255), 5)
    device +=1
  if color=="gray":
    ix, iy = np.shape(img)
    size = ix,iy
    bnk = np.zeros(size, dtype=np.uint8)
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (192,192,192))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(bnk, contour, -1 ,(192,192,192), 5)
    device +=1
  if debug:
    print_image(bnk, (str(device) + '_brd_mskd_' + '.png'))
  return device, bnk, contour, hierarchy
Ejemplo n.º 3
0
def get_motions(f, fMask, thickness=1, color=(170, 170, 170)):
    '''
    Iterates over the contours in a mask and draws a bounding box
    around the ones that encompas an area greater than a threshold.
    This will return an image of just the draw bock (black bg), and
    also an array of the box points.
    '''
    rects_mot = []
    f_rects = np.zeros(f.shape, np.uint8)
    # get contours
    if imutils.is_cv3():
        _, cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv2():
        cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < contourThresh:
            continue

        if imutils.is_cv3():
            box = cv2.boxPoints(cv2.minAreaRect(c))
        elif imutils.is_cv2():
            box = cv2.cv.BoxPoints(cv2.minAreaRect(c))

        box = np.int0(box)
        cv2.drawContours(f_rects, [box], 0, color, thickness)
        rects_mot.append(cv2.boundingRect(c))
    return f_rects, rects_mot
Ejemplo n.º 4
0
def cropImage(edges, blur):
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # borders = find_border_components(contours, edges)
    # borders.sort(key=lambda (i, x1, y1, x2, y2): (x2 - x1) * (y2 - y1))
    # count=len(borders)
    # print len(contours),count
    # for i in range(0,count):
    #    index, left,top,right,bottom =borders[i]
    #    img_crop=cv2.getRectSubPix(img, (right-left, bottom-top), ((left+right)/2, (top+bottom)/2))
    #    cv2.imwrite('1right%d' % (left) +'.png', img_crop)

    # iand = cv2.bitwise_and(img,img,mask=edges)
    # contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(edges,contours,-1,(255,255,255),-1)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 50))
    closed = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
    # closed = cv2.erode(closed, None, iterations = 4)
    closed = cv2.dilate(closed, None, iterations=13)
    # cv2.imwrite('closed.png',closed)
    (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    for i in range(0, 1):
        if len(cnts) == 0:
            break
        if i == 1 and len(cnts) == 1:
            break
        c = cnts[i]
        # compute the rotated bounding box of the largest contour
        rect = cv2.minAreaRect(c)
        box = np.int0(cv2.cv.BoxPoints(rect))
        # cv2.drawContours(img, [box], -1, (0, 255, 0), 3)
        left, top, right, bottom = caculateRect(box)
        img_crop = cv2.getRectSubPix(blur, (right - left, bottom - top), ((left + right) / 2, (top + bottom) / 2))
        cv2.imwrite("right%d" % (left) + ".png", img_crop)
Ejemplo n.º 5
0
def rectangle_mask(img, p1, p2, device, debug, color="black"):
  # takes an input image and returns a binary image masked by a rectangular area denoted by p1 and p2
  # note that p1 = (0,0) is the top left hand corner bottom right hand corner is p2 = (max-value(x), max-value(y))
  # device = device number. Used to count steps in the pipeline
  # debug = True/False. If True; print output image
  # get the dimensions of the input image
  ix, iy = np.shape(img)
  size = ix,iy
  # create a blank image of same size
  bnk = np.zeros(size, dtype=np.uint8)
  # draw a rectangle denoted by pt1 and pt2 on the blank image
  
  if color=="black":
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (255,255,255))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    # make sure entire rectangle is within (visable within) plotting region or else it will not fill with thickness = -1
    # note that you should only print the first contour (contour[0]) if you want to fill with thickness = -1
    # otherwise two rectangles will be drawn and the space between them will get filled
    cv2.drawContours(bnk, contour, 0 ,(255,255,255), -1)
    device +=1
  if color=="gray":
    cv2.rectangle(img = bnk, pt1 = p1, pt2 = p2, color = (192,192,192))
    ret, bnk = cv2.threshold(bnk,127,255,0)
    contour,hierarchy = cv2.findContours(bnk,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    # make sure entire rectangle is within (visable within) plotting region or else it will not fill with thickness = -1
    # note that you should only print the first contour (contour[0]) if you want to fill with thickness = -1
    # otherwise two rectangles will be drawn and the space between them will get filled
    cv2.drawContours(bnk, contour, 0 ,(192,192,192), -1)
  if debug:
    print_image(bnk, (str(device) + '_roi.png'))
  return device, bnk, contour, hierarchy
Ejemplo n.º 6
0
    def trackRobot(self, imagePath):
        '''this function track the robot and return its coordinates'''
        img = cv2.imread(imagePath)
        img = cv2.flip(img, 1)
        img = cv2.flip(img, 0)

        # convert into hsv 
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # Find mask that matches 
        green_mask = cv2.inRange(hsv, np.array((50., 30., 0.)), np.array((100., 255., 255.)))
        green_mask = cv2.erode(green_mask, None, iterations=2)
        green_mask = cv2.dilate(green_mask, None, iterations=2)

        green_cnts = cv2.findContours(green_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        green_c = max(green_cnts, key=cv2.contourArea)

        # fit an ellipse and use its orientation to gain info about the robot
        green_ellipse = cv2.fitEllipse(green_c)

        # This is the position of the robot
        green_center = (int(green_ellipse[0][0]), int(green_ellipse[0][1]))

        red_mask = cv2.inRange(hsv, np.array((0., 100., 100.)), np.array((80., 255., 255.)))
        red_mask = cv2.erode(red_mask, None, iterations=2)
        red_mask = cv2.erode(red_mask, None, iterations=2)

        red_cnts = cv2.findContours(red_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        red_c = max(red_cnts, key=cv2.contourArea)

        red_ellipse = cv2.fitEllipse(red_c)
        red_center = (int(red_ellipse[0][0]), int(red_ellipse[0][1]))


        return green_center, red_center   
Ejemplo n.º 7
0
def getContours(image):
    try:
        contours, hierarchy = cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    except:
        img, contours, hierarchy = cv2.findContours(image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)
    return contours
def checkOreantation(img):

    LMG = []
    for name in ['5','twoTH','ThreeEN']:
        sample = cv2.imread(name+'.jpg')
        sample = cv2.cvtColor(sample, cv2.COLOR_BGR2GRAY)
        ret, sample = cv2.threshold(sample, 127, 255,0)
        sample, contours, hierarchy = cv2.findContours(sample, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
        sample = []
        for cnt in contours:
            sample += cnt.tolist()
        sample = np.array(sample)
        LMG.append(sample)
    img, contours, hierarchy = cv2.findContours(img, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
    img = []
    for cnt in contours:
        img += cnt.tolist()
    img = np.array(img)

    p_ret = cv2.matchShapes(img,LMG[0],1,0.0)
    for i in range(1,len(LMG)):
        ret = cv2.matchShapes(img,LMG[i],1,0.0)
        if ret < p_ret:
            p_ret = ret
    return ret
Ejemplo n.º 9
0
def fix_target_perspective(contour, bin_shape):
    """
    Fixes the perspective so it always looks as if we are viewing it head-on
    :param contour:
    :param bin_shape: numpy shape of the binary image matrix
    :return: a new version of contour with corrected perspective, a new binary image to test against,
    """
    before_warp = np.zeros(bin_shape, np.uint8)
    cv2.drawContours(before_warp, [contour], -1, 255, -1)

    try:
        corners = get_corners(contour)

        # get a perspective transformation so that the target is warped as if it was viewed head on
        shape = (400, 280)
        dest_corners = np.array([(0, 0), (shape[0], 0), (0, shape[1]), (shape[0], shape[1])], np.float32)
        warp = cv2.getPerspectiveTransform(corners, dest_corners)
        fixed_perspective = cv2.warpPerspective(before_warp, warp, shape)
        fixed_perspective = fixed_perspective.astype(np.uint8)

        if int(cv2.__version__.split('.')[0]) >= 3:
            _, contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            contours, _ = cv2.findContours(fixed_perspective, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        new_contour = contours[0]

        return new_contour, fixed_perspective

    except ValueError:
        raise ValueError('Failed to detect rectangle')
Ejemplo n.º 10
0
def threshold_for_contours(img_from_array):
    hsvimg = cv2.cvtColor(img_from_array, cv2.COLOR_BGR2HSV)

    #print type(hsvimg)
    lower_blue = np.array([110, 50, 50], np.uint8)
    #estimated values until testing
    upper_blue = np.array([140, 255, 255], np.uint8)
    #estimated values until testing

    #threshold_for_contours boundaries to only get orange colors
    lower_orange = np.array([5, 100, 100], np.uint8)
    #estimated values until testing
    upper_orange = np.array([27, 255, 255], np.uint8)
    #estimated values until testing

    #heres where it is actually threshold_for_contourse
    #using the boundaries to get specified colors
    #print 'Type:', hsvimg.dtype, 'Other bullshit:', hsvimg.shape
    blue = cv2.inRange(hsvimg, lower_blue, upper_blue)
    orange = cv2.inRange(hsvimg, lower_orange, upper_orange)
    #blue and orange are binary threshholded images
    #if the image initially had a blue piece,
    #blue will have values of zero and 1
    # while orange will just 0's and vice versa
    contours_blue, hierarchy_blue = cv2.findContours(blue, 1, 2)
    contours_orange, hierarchy_orange = cv2.findContours(orange, 1, 2)
    #for some reason contours only has 1 element in it at the most
    return contours_blue, contours_orange
Ejemplo n.º 11
0
    def __grabAtoms(self, image):
        from scipy.spatial import ConvexHull

        segImg = self.segmenter.segment(image)
        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                c = np.squeeze(cnt)
                cv2.fillConvexPoly(segImg, c[ConvexHull(c).vertices], 255)
            else:
                cv2.fillConvexPoly(segImg, cnt, 0)

        contours, _ = cv2.findContours(segImg.copy(),
                                        cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_NONE)

        conts = []
        centers = []
        for cnt in contours:
            M = cv2.moments(cnt)
            if M['m00'] > 0.0:
                centers.append(np.array((int(M['m10']/M['m00']), int(M['m01']/M['m00']))))
                conts.append(cnt)

        self.segImg = segImg
        self.points = np.array(centers)
        self.contours = np.array(conts)
        return self.points
Ejemplo n.º 12
0
def shape_match():

    img1 = cv2.imread('l1.png', 0)
    img2 = cv2.imread('l2.png', 0)

    ret, thresh1 = cv2.threshold(img1, 13, 255, 1)
    ret, thresh2 = cv2.threshold(img2, 13, 255, 1)

    im1,contours1,hierarchy = cv2.findContours(thresh1, 2, 1)
    im2,contours2,hierarchy = cv2.findContours(thresh2, 2, 1)
    print len(contours1), len(contours2)
    cv2.drawContours(img1,contours1, -1, (200,0,0))
    cv2.drawContours(img2,contours2, -1, (200,0,0))
    cv2.imshow("1",img1)
    cv2.imshow("2",img2)
    cv2.waitKey()
    #query_cnt = get_correct_cnt(contours2, img2)
    #if query_cnt is None:
    #    print 'parse img failed'
    #    return

    height, width  = img1.shape
    area = height * width
    min_area = area / 25
    max_area = area / 5

    for cnt in contours1:
        print cv2.boundingRect(cnt)
        letter_area = get_cnt_area(cnt)
        if not (min_area < letter_area and letter_area < max_area):
            continue

        print cv2.matchShapes(cnt, query_cnt, 1, 0.0)
Ejemplo n.º 13
0
def getPerfectContours(img):
    """
    Contour area threshold for noise is the most important thing here
    """
    contAreaThreshold = 50

    # Blur the input image to smoothen out the discontinuities
    img_blur = cv2.GaussianBlur(img, (5, 5), 0)

    # Find the contours for blurred image
    ret_img, contours, hierarchy = cv2.findContours(img_blur, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # Only keep the contours with large areas, i.e. noiseless contours
    contours = [x for x in contours if cv2.contourArea(x) > contAreaThreshold]

    # Generate a mask that covers only the contour area
    mask = np.zeros(shape=img.shape, dtype=img.dtype)
    cv2.drawContours(mask, contours, -1, 255, cv2.FILLED)

    # Apply the mask to the original image
    img = cv2.bitwise_and(img, img, mask=mask)

    # Find the contour again
    ret_img, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # Draw the contours
    cv2.drawContours(img, contours, -1, 255, 1)

    return img
Ejemplo n.º 14
0
def filter_words(img):
    orig = img.copy()
    cv2.Laplacian(img, 0, img, 1)

    kernel = np.ones((3, 5), np.uint16)
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    img_ret, contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    (h, w) = img.shape
    new_value = 100
    mask = np.zeros((h + 2, w + 2, 1), np.uint8)
    for contour in contours:
        x, y, w, h = cv2.boundingRect(contour)
        seed_point = (x + 3 * w / 4, y + h / 2)
        cv2.floodFill(img, mask, seed_point, new_value)

    cv2.threshold(img, 30, 255, cv2.THRESH_BINARY, img)

    img_ret, post_contours, hierarchy = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    img = draw_contours(img, post_contours, 100)
    orig = draw_contours(orig, post_contours, (0, 0, 255))

    return img, orig
def save_Hu_moments(thresholded, EoLobjectID, contour_dir, filename):
    '''Save to file the 7 Hu moments for each contour in each image for statistical analysis (e.g. for analysis to predict
    which are butterfly shaped). Also save images of each contour so we can look through and mark by hand which are the
    butterfly outlines'''
    if not hasattr(save_Hu_moments, "writefile"):
        save_Hu_moments.writefile = open(os.path.join(contour_dir,filename), 'w')  # it doesn't exist yet, so initialize it
        save_Hu_moments.writefile.write("img-contour	crude.points	simp.crude.points	smooth.points	simp.smooth.points	area	hu1	hu2	hu3	hu4	hu5	hu6	hu7\n")

    crude_contours = cv2.findContours(thresholded.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]
    smooth_contours = cv2.findContours(thresholded.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_TC89_KCOS)[0]
    if len(crude_contours) != len(smooth_contours):
        print("oops - the two contour methods should give the same number of contour areas")
        exit

    for i in range(len(smooth_contours)):
        if cv2.contourArea(smooth_contours[i]) > 0.001*thresholded.shape[0]*thresholded.shape[1]: #only pick areas > 0.1% of the area
            roi = np.asarray(cv2.boundingRect(smooth_contours[i]))
            img = np.zeros((roi[3], roi[2],3), np.uint8)
            cv2.drawContours(img, [smooth_contours[i]-roi[0:2]], 0, (255,255,255), 2)
            cv2.imwrite(os.path.join(contour_dir, "{}-{}.jpg".format(EoLobjectID, i)), img)
#            np.save(os.path.join(contour_dir, "{}-{}.npy".format(EoLobjectID, i)), smooth_contours[i]) #save the contour coordinates
            
            Hu_text = "\t".join(np.char.mod('%e', cv2.HuMoments(cv2.moments(smooth_contours[i]))).flatten())
            contour_lengths = "\t{}\t{}\t{}\t{}".format(len(crude_contours[i]), len(cv2.approxPolyDP(crude_contours[i],1,True)), len(smooth_contours[i]), len(cv2.approxPolyDP(smooth_contours[i],1,True)))
            save_Hu_moments.writefile.write("{}-{}\t{}\t{}\t{}\n".format(EoLobjectID ,i, contour_lengths, cv2.contourArea(smooth_contours[i]), Hu_text))
            save_Hu_moments.writefile.flush()
Ejemplo n.º 16
0
 def find(image1, image2):
     rect = (0, 0, 0, 0)
     firstFrame = image1.decode('base64', 'strict')
     firstFrame = cv2.imdecode(np.fromstring(firstFrame, dtype=np.uint8), -1)
     img = image2.decode('base64', 'strict')
     img = cv2.imdecode(np.fromstring(img, dtype=np.uint8), -1)
     if firstFrame is not None and img is not None:
         firstGray = cv2.cvtColor(firstFrame, cv2.COLOR_BGR2GRAY)
         firstGray = cv2.equalizeHist(firstGray)
         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
         gray = cv2.equalizeHist(gray)
         frameDelta = cv2.absdiff(firstGray, gray)
         thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
         thresh = cv2.dilate(thresh, None, iterations=2)
         if platform.system() == 'Windows':
             _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         else:
             cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
         contourMax = None
         areaMax = None
         for c in cnts:
             contour = cv2.contourArea(c)
             if contour < 500:
                 continue
             if contourMax is None or contour > contourMax:
                 contourMax = contour
                 areaMax = c
         if not areaMax is None:
             (x, y, w, h) = cv2.boundingRect(areaMax)
             rect = (x, y, w, h)
     return rect
Ejemplo n.º 17
0
    def ImageSegmentation(self):

        kernel = np.array(self.coords_cell, np.int32)
        circle = np.zeros(self.image.shape[:2], np.uint8)

        # link with polylines the coordinates of "left click", thickness could be adjusted,
        # Could also fill inside the polyline
        cv2.polylines(circle,[kernel],False,(255,0,0), thickness=5)
        kernel2 = np.array(self.coords_cell, np.int32)
        circle2 = np.zeros(self.image.shape[:2], np.uint8)
        cv2.polylines(circle2,[kernel2],False,(255,0,0), thickness=4)

        # Segmentation of the protein accumulation using watershed
        self.segmentation = morphology.watershed(self.clean_image, self.markers, mask = circle)
        self.segmentation[self.segmentation < 1.5] = 0
        self.segmentation = self.segmentation.astype('uint8')

        # Find contour of the segmented area
        contours,hierarchy = cv2.findContours(self.segmentation, 1, 2)

        # Find countour of the masked area
        contours_circle,hierarchy = cv2.findContours(circle2, 1, 2)
        self.area = [cv2.contourArea(cnt) for cnt in contours if (cv2.contourArea(cnt))!=0.0]

        self.area = sum(self.area)
        self.area_mask = [cv2.contourArea(cnt_cell) for cnt_cell in contours_circle]
        self.area_mask = sum(self.area_mask)

        if self.area > 0:
            self.surface_segmented.append(self.area)
        if self.area_mask > 0:
            self.surface_masked.append(self.area_mask)
Ejemplo n.º 18
0
def test_area():
    """
    Tests if the contour being drawn on the picture has the same area as the one
    used to initialize the class.
    """
    shape = (100, 100, 3)
    image = np.zeros(shape, dtype=np.uint8)
    # Initilized with a box
    b = bounding_region.BoundingRegion(shape, box=np.array([10, 20, 15, 45]))
    b.draw_box(image, (255, 255, 255), thickness=-1)
    imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    contours, hierarchy = cv2.findContours(imgray, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
    assert(len(contours) == 1)
    area = cv2.contourArea(contours[0])
    assert(area == b.get_area_pixels())

    # Initilized with a contour
    image = np.zeros(shape, dtype=np.uint8)
    b = bounding_region.BoundingRegion(shape, contour=np.array([[[10, 20]], [[25, 20]], [[25, 65]], [[10, 65]]]))
    b.draw_box(image, (255, 255, 255), thickness=-1)
    imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    contours, hierarchy = cv2.findContours(imgray, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
    assert(len(contours) == 1)
    area = cv2.contourArea(contours[0])
    assert(area == b.get_area_pixels())
Ejemplo n.º 19
0
 def detectRover(self, argFrame):
     frame    = self.frame
     hsvFrame = self.frame
     thresh   = self.frame[:,:,0]
     rGreen = (38,67,155,198,0,255)
     rPink = (165,182,155,192,0,255)
     hsvFrame  = cv2.cvtColor(self.frame.copy(), cv2.COLOR_BGR2HSV)
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rGreen[0],rGreen[2],rGreen[4]]),np.array([rGreen[1],rGreen[3],rGreen[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     greenPt = (int((x+x+w)/2),int((y+y+h)/2))
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rPink[0],rPink[2],rPink[4]]),np.array([rPink[1],rPink[3],rPink[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     pinkPt = (int((x+x+w)/2),int((y+y+h)/2))
     self.roverPos = (int((greenPt[0]+pinkPt[0])/2),int((greenPt[1]+pinkPt[1])/2))
     angle = getAngle(pinkPt[0],pinkPt[1],greenPt[0],greenPt[1])
     self.roverHeading = 360+angle[2]*-1
     return greenPt, pinkPt
Ejemplo n.º 20
0
def detect_count(img_bw):
    BWt = img_bw.copy()
    cntrs, hircy = cv2.findContours(BWt,
                                    cv2.RETR_EXTERNAL,    # cv2.RETR_TREE,
                                    cv2.CHAIN_APPROX_SIMPLE)
    F = np.zeros(img_bw.shape, dtype=np.uint8)
    MASK = np.zeros(img_bw.shape, dtype=np.uint8)
    areas = [cv2.contourArea(cnt) for cnt in cntrs]
    t = np.mean(filter(lambda x: x > 50, areas))
    t = t * 0.9
    MASKrect = None
    MASKcnt = None
    wasMasked = False
    for idx, cnt in enumerate(cntrs):
        if (areas[idx] > t):
            # thickness -1 will fill the conntours
            cv2.drawContours(F, [cnt], 0, (255), thickness=-1)
            if (not wasMasked):
                wasMasked = True
                cv2.drawContours(MASK, [cnt], 0, (255), thickness=-1)
                MASKcnt = cnt
                MASKrect = cv2.boundingRect(cnt)

    # cv2.imshow('Count', F)
    # cv2.imshow('Mask', self.MASK)
    cntrs, hircy = cv2.findContours(F,
                                    cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)

    amount = len(cntrs)
    return amount, MASK, MASKrect, MASKcnt
Ejemplo n.º 21
0
def ostacoli(frame):
    
    gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    edges = cv2.Canny(img,59,76, apertureSize = 3)
    #edges = cv2.GaussianBlur(edges,(5,5),5)
    
    kernel = np.ones((14,14),np.uint8)
    #edges = cv2.erode(edges,kernel,iterations = th1)
    #edges = cv2.dilate(edges,kernel,iterations = th1)       
    #edges = cv2.erode(edges,kernel,iterations = th2)
    
    
    edges = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
    
    edges = cv2.erode(edges,kernel,iterations = 2)
    
    #cp = copy.deepcopy(edges)
    
    cp1 = edges[r1[2]:r1[3],r1[0]:r1[1]]
    
    cp2 = edges[r2[2]:r2[3],r2[0]:r2[1]]
    
    contours1,hierarchy = cv2.findContours(cp1, 1, 2)
    contours2,hierarchy = cv2.findContours(cp2, 1, 2)
    
    contours1 = filtra_ostacoli(contours1)
    contours2 = filtra_ostacoli(contours2)
    
    return (contours1, contours2)
Ejemplo n.º 22
0
def getContoursRectImage(img):
	result_img=[]
	result_hu=[]
	width=int(32*len(img[0])/len(img))
	img = cv2.resize(img, (width, 32), interpolation=cv2.INTER_AREA)
	img_gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #灰度
	img_smooth=cv2.GaussianBlur(img_gray, (1,9), 0) #高斯平滑,纵向,为了让0,7这种上下连通
	#如果灰度的均值大于 maxMinAvarge 则说明背景偏亮,则要让背景变为黑色
	if(np.mean(img_smooth)>maxMinAvarge(img_smooth)):
		ret,img_binary=cv2.threshold(img_smooth,maxMinAvarge(img_smooth),255,cv2.THRESH_BINARY_INV)
	else:
		ret,img_binary=cv2.threshold(img_smooth,maxMinAvarge(img_smooth),255,cv2.THRESH_BINARY)
	image, contours, hierarchy = cv2.findContours(img_binary.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
	#img=cv2.drawContours(img,contours,-1,(255,0,0),3)
	for ctr in contours:
		rect=cv2.boundingRect(ctr)
		cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 1)
		roi = img_binary[rect[1]:rect[3]+rect[1],rect[0]:rect[2]+rect[0]]
		width=int(32*rect[2]/rect[3])
		roi = cv2.resize(roi, (width, 32), interpolation=cv2.INTER_AREA)
		kernel = np.ones((3,3),np.uint8)
		roi = cv2.erode(roi,kernel,iterations = 1)
		image, contours, hierarchy = cv2.findContours(img_binary.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
		m=cv2.moments(roi)
		hm=cv2.HuMoments(m)
		result_hu.append(hm)
		result_img.append(roi)
	return [result_img,result_hu,img_gray,img_binary,img]
Ejemplo n.º 23
0
def findherb(herb_object):
    #takes bank screenshot
    bank_screenshot, bankx, banky = RS.getBankWindow('hsv')

    # finds all grimmys first
    low, high = herb_object.herbdic['grimmy']
    low = np.array(low)
    high = np.array(high)
    mask = cv2.inRange(bank_screenshot, low, high)

    # how big in pixels to remove noise
    kernel = np.ones((5,5), np.uint8)

    # removes noise
    #erosion = cv2.erode(mask, kernel, iterations = 1)

    # increases white 
    dilation = cv2.dilate(mask, kernel, iterations = 1)

    _, contours, _ = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # fills in the contours in the mask with a rect
    for con in contours:
        x, y, w, h = cv2.boundingRect(con)
        cv2.rectangle(mask,(x,y),(x+w,y+h),(255,255,255),-1)
    # result of finding only grimmys in the hsv image
    res = cv2.bitwise_and(bank_screenshot,bank_screenshot, mask = mask.copy())
    # finding the passed herb here based on color range 
    low, high = herb_object.hsv_range
    low = np.array(low)
    high = np.array(high)
    herb_mask = cv2.inRange(res, low, high)
    ###########
    #debug line
    #cv2.imshow('debug.png', herb_mask)
    #cv2.waitKey(0)
    #return
    ###########

    _, contours, _ = cv2.findContours(herb_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    contour_areas = {}
    # finds center of herb
    for con in contours:
        M = cv2.moments(con)
        #print(M)
        # gets center of object
        x,y = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])
        break
    if y == 1:
        return 
    # makes coords relative to the window
    x += RSX + bankx
    y += RSY + banky
    # creates a list from -20 to 20
    pixels = [i for i in range(-15,15)]
    # randomly adds value from pixels list
    x += random.choice(pixels)
    y += random.choice(pixels)

    # returns coords to right click and get options
    return x, y 
Ejemplo n.º 24
0
def getContoursRectImage(img):
	result_img=[]
	result_hu=[]
	image, contours, hierarchy = cv2.findContours(img.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
	new_img=np.zeros((len(img),len(img[0])), np.uint8)
	new_img=cv2.drawContours(new_img,contours,-1,(255,255,255),1)
	image, contours, hierarchy = cv2.findContours(new_img.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
	for ctr in contours:
		rect=cv2.boundingRect(ctr)
		roi = new_img[rect[1]:rect[3]+rect[1],rect[0]:rect[2]+rect[0]]
		width=int(32*rect[2]/rect[3])
		roi = cv2.resize(roi, (width, 32), interpolation=cv2.INTER_AREA)
		#blank_image = np.zeros((32+2,width+2), np.uint8)
		#blank_image[1:32+1,1:width+1]=roi
		blank_image = np.zeros((32+2,40), np.uint8)
		tmpx=int(40/2-width/2)
		if(tmpx<=0):tmpx=0
		blank_image[1:32+1,tmpx-1:tmpx+width-1]=roi
		#blank_image=cv2.Canny(blank_image,100,200)
		image, contours1, hierarchy = cv2.findContours(blank_image.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
		m=cv2.moments(ctr)
		hm=cv2.HuMoments(m)
		result_hu.append(hm)
		result_img.append(blank_image)
	return [result_img,result_hu]
Ejemplo n.º 25
0
    def process(self, annotator, iohelper):
        img = cv2.imread(iohelper.thumbnail())
        image = img.copy()

        # Preprocessing like gray-scaling, thresholding, closing
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray_blur = cv2.GaussianBlur(gray, (9, 9), 0)
        thresh = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 11)
        kernel = np.ones((3, 3), np.uint8)
        closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)

        # Contour detection
        cont_img = closing.copy()
        if int(cv2.__version__[0]) < 3:
            contours, _ = cv2.findContours(\
                    cont_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(\
                    cont_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # Draw rectangles and add found bug to annotator.
        i = 0
        for cnt in contours:
            area = cv2.contourArea(cnt)
            ellipse = cv2.fitEllipse(cnt)
            center, axes, angle = ellipse
            rect_area = axes[0] * axes[1]
            rect = np.round(np.float64(cv2.cv.BoxPoints(ellipse))).astype(np.int64)
            annotator.add_bug(*rect)
            color = (255,0,0)
            cv2.drawContours(image, [rect], 0, color, 1)
            i = (i + 30) % 255

        return image
Ejemplo n.º 26
0
def draw_bounding(img_url, n):
    """Given input image, draw bounding rectangles on top of
    first n contours"""
    img = cv2.imread(img_url)    
    original = cv2.imread(img_url)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, img = cv2.threshold(img, 127, 255, 1)
    plt.gray()

    contours, hier = cv2.findContours(np.array(img), 
                                      cv2.RETR_EXTERNAL, 
                                      cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) == 1: 
        print cv2.boundingRect(contours[0])
        cv2.drawContours(img, [contours[0]], 0, 0, 100) 

    contours, hier = cv2.findContours(np.array(img), 
                                      cv2.RETR_EXTERNAL, 
                                      cv2.CHAIN_APPROX_SIMPLE)
    #sort contours by maximum area
    contours = sorted(contours, key=lambda cnt:cv2.contourArea(cnt), reverse=True)
     
    for cnt in contours[:n]:
        x,y,w,h = cv2.boundingRect(cnt)
        cv2.rectangle(original, (x,y), (x+w,y+h), (0,255,0),2)

    plt.imshow(original)
    plt.show()
def detectMov(img):
    ret,thresh = cv2.threshold(img,127,255,0)
    
    if opencvver[0] == '3' and opencvver[1] == '1':
        #para operar no opencv 3.1
        im2,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    else:
        #para operar no opencv 2.4
        contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if len(contours)<1 :
       return []
   #[(0,0,1,1)],[SuperRect((0,0,1,1),-1)]
    listc = []
    for cnt in  contours:
        x,y,w,h=cv2.boundingRect(cnt)
        px = w/2.5
        py = h/2.5
        x =  int(x - px)
        y =  int(y - py)
        w += int(2*px)
        h += int(2*py)
        if w*h>25:
            listc.append((x,y,w,h))
    lists = aglutina(listc)
    listc2 = []
    for sr in lists:
        listc2.append(sr.getRect())
    lists2 = aglutina(listc2)
    #area de restricao dos retangulos
    (hr, wr) = img.shape[:2]
    rectpts = []
    for reci in lists2:
        rectpts.append(reci.getAjRect(wr,hr))

    return rectpts
Ejemplo n.º 28
0
def find_prayer_pot():
    rs_bag, bagx, bagy = RS.get_bag('bag coords', 'hsv')
    # prayer potion color ranges
    low = np.array([78,140,0])
    high= np.array([81,225,211])
    mask = cv2.inRange(rs_bag, low, high)

    kernel = np.ones((5,5), np.uint8)
    dilation = cv2.dilate(mask, kernel, iterations = 1)
    
    _,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    for con in contours:
        x, y, w, h = cv2.boundingRect(con)
        cv2.rectangle(mask,(x,y), (x+w, y+h), (255,255,255),-1)

    _,contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    for con in contours[::-1]:
        M = cv2.moments(con)
        mx, my = int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])
        mx += RSX + bagx
        my += RSY + bagy

        mx += random.randint(-7,7)
        my += random.randint(-12,5)

        Mouse.moveClick(mx,my,1)
        #Mouse.moveTo(mx,my)
        break
Ejemplo n.º 29
0
def get_contour_sample(img_orig, sample_interval=5):
    """
    Return contour sample points of a image
    """
    img = img_orig.copy()
    img[img > 10] = 255
    element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
    mask = pad_image_cnt(img, None, (0, 0), (300, 300), bg=(1500,1500))
#    print mask
    for j in range(99):
        mask = cv2.dilate(mask, element)
#        cv2.imshow("mask_dilate", mask)
#        cv2.waitKey()
        cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, 
                                      cv2.cv.CV_CHAIN_APPROX_TC89_L1)
        if len(cnts) == 1:
            break
#    cv2.imshow("mask_dilate", mask)
#    cv2.waitKey()
    
    mask = cv2.erode(mask, element, iterations=j+1)
#    cv2.imshow("mask", mask)
#    cv2.waitKey()
#    cv2.destroyWindow("mask")
    cnts, hier = cv2.findContours(mask.copy(), cv2.cv.CV_RETR_EXTERNAL, 
                                  cv2.cv.CV_CHAIN_APPROX_NONE)
    img_contour_pts = np.squeeze(np.vstack(cnts))
    img_contour_sample = img_contour_pts[range(0, img_contour_pts.shape[0], sample_interval), :]
    img_contour_sample = img_contour_sample - np.matlib.repmat((300,300),
                                                               img_contour_sample.shape[0], 1)
#    draw_contours(img_contour_sample, (800,800), show=True)
    return img_contour_sample
Ejemplo n.º 30
0
def cutTaxPayer(img):
    new_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    new_img = cv2.adaptiveThreshold(new_img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 9 , 9)
    contours0, hierarchy = cv2.findContours(new_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    H, W = img.shape[:2]
    result = np.zeros((H, W), np.uint8)
    for i in range(len(contours0) - 1, -1, -1):
        cnt = contours0[i]
        x, y, w, h = cv2.boundingRect(cnt)
        if w < 10 or h < 10:
            contours0.pop(i)
        else :
            cv2.rectangle(result, (x, y), (x + w, y + h), (255), 1)
            
    for h in range(H):
        for w in range(W):
            if  result[h][w] == 255:
                cv2.line(result, (0, h), (W, h), (255))
                break
    contours1, hierarchy1 = cv2.findContours(result, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # for cnt in contours1:
    #     x, y, w, h = cv2.boundingRect(cnt)  
    #     cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
    #     print x, y
    # cv2.imshow("xxx1", img)
    if len(contours1) < 2:
        return np.ones((H / 4, W * 3 / 4, 3), np.uint8) * 255 
    x, y, w, h = cv2.boundingRect(contours1[-2])  
    conter = (x + w / 2, y + h / 2)
    img = cv2.getRectSubPix(img, (w, h), conter)
    return img
while cap.isOpened():

    if (ret1 == True):

        #finding the difference between each frame.
        diff = cv2.absdiff(frame1, frame2)

        #converting the frame to gray scale.
        im_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(im_gray, (5, 5), 0)
        _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
        #it helps to remove the useless information or background distrubance.
        dilate = cv2.dilate(thresh, None, iterations=5)

        #finding the contours.
        contour, _ = cv2.findContours(dilate, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)

        for cnt in contour:

            #finding the coordinates of each contour found.
            x, y, w, h = cv2.boundingRect(cnt)

            #trying to find the car in each frame by finding the area of each contour.
            if (cv2.contourArea(cnt) >= 19000
                    and cv2.contourArea(cnt) <= 24000):

                frame_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)

                #Finding the edges using canny edge detection.
                canny = cv2.Canny(frame_gray, 170, 255)
Ejemplo n.º 32
0
import numpy as np
import cv2

img = cv2.imread('trapezium.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

ret,thresh = cv2.threshold(gray,127,255,1)

_,contours,h = cv2.findContours(thresh,1,2)

for cnt in contours:
    M = cv2.moments(cnt)
    cx = int(M['m10']/M['m00'])
    cy = int(M['m01']/M['m00'])

    print cx,",",cy
    
    centroid = "("+str(cx) + "," + str(cy)+")"
    cv2.putText(img, centroid, (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 1)
    cv2.putText(img, "centroid", (cx+20, cy+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 1)


cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
table = __extract_region__("/home/ggdhines/eastwind-wag-279-1946_0523-0.JPG",
                           region=(127, 1467, 275, 1151))

pca_image, threshold, inverted = __pca__(table,
                                         __binary_threshold_curry__(175))
print(threshold)
mask = __create_mask__(table)

masked_image = __mask_lines__(pca_image, mask)

kernel = np.ones((5, 5), np.uint8)
# masked_image = 255 - masked_image
# closing = cv2.morphologyEx(masked_image, cv2.MORPH_CLOSE, kernel)
# closing = 255 - closing

im2, contours, hierarchy = cv2.findContours(masked_image.copy(), cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)

for cnt in contours:
    x, y, w, h = cv2.boundingRect(cnt)
    perimeter = cv2.arcLength(cnt, True)

    if (w / h > 5) or (h / w > 5) or (perimeter < 20):
        cv2.drawContours(masked_image, [cnt], 0, 255, -1)
    else:

        print(perimeter)

plt.imshow(masked_image, cmap="gray")
plt.show()
#     perimeter = cv2.arcLength(cnt, True)
#
Ejemplo n.º 34
0
    upper_blue = np.array([130,255,255])
    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    res = cv2.bitwise_and(frame,frame, mask= mask)
    return res
   
  while(1):
    
    img1 = musky(cv2.GaussianBlur(frame1,(55,51),0))
    img2 = musky(cv2.GaussianBlur(frame2,(55,51),0))
    diff = cv2.absdiff(img1,img2)
    img3 = cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY)
    kernel = np.kones((480,640),np.uint8)
    erosion = cv2.erode(,kernel,iterations = 5)
    dilation = cv2.dilate(mask,kernel,iterations = 5)
	    
    image, contours, hierarchy = cv2.findContours(img3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    
    cv2.drawContours(frame1, contours, -1, (0,0,255), 3)
    l = len(contours)
    print (l)


    
    if l > 0:
    	for i in range(l):
        	 area = cv2.contourArea(contours[i])
        	 if i is 0:
        	       Max=area
        	       k=0
        	 elif (area>Max):
        	       k=i
Ejemplo n.º 35
0
def theProcess(bounds, thisWord, words):
	print "STARTED: "+thisWord
	first = None
	content = None
	lastCount = 0
	hwindex = 0
	flag = False
	normalArea = -1
	avgNormalArea = [0, 0]
	flip = False
	lastTime = -1
	pressed = False

	while True:
		frameArr = frames.get()
		s, f = frameArr[0], frameArr[1]
		frames.put(frameArr)
		if f == None:
			continue

		if "-s" in argv:
			cv2.imshow("frame", f)
		if s:
			f = f[bounds[0]:bounds[1], bounds[2]:bounds[3]]
			gray = cv2.cvtColor(f, cv2.cv.CV_BGR2GRAY)
			if first==None:
				first = gray
				content = gray
			else:
				diff = cv2.absdiff(first, gray)
				_, diff = cv2.threshold(diff,20,255,cv2.THRESH_BINARY)
				(cnts, _) = cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
				toDel = []
				totalArea = 0
				for c in xrange(len(cnts)):
					currentArea = cv2.contourArea(cnts[c]) 
					if currentArea<20:
						toDel.append(c)
					totalArea+=currentArea
				for i in xrange(len(toDel)):
					del cnts[toDel[i]-i]
				#print "NUM CONTS: ",len(cnts)
				if "-s" in argv:
					cv2.imshow(thisWord+"diff", diff)
				#white=np.sum(diff)
				if (lastCount>2*len(cnts) and len(cnts)==0):
					flag = True
					#print hwindex, "Hello world!"
				#print "DIFFERENCE: ",abs(white-lastCount)
				lastCount = len(cnts)
				first = gray
			thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,101,20)
	
			#thresh = cv2.erode(thresh, None)
			thresh = cv2.dilate(thresh, None)
			if(avgNormalArea[1]<5):
				avgNormalArea[0]+=np.sum(thresh)
				avgNormalArea[1]+=1
				if(avgNormalArea[1]==5):
					normalArea = avgNormalArea[0]/avgNormalArea[1]
					print normalArea
			elif flag:
				if (abs(np.sum(thresh)-normalArea)>100000):
					if lastTime==-1:
						lastTime = time()
						#print thisWord
						words.put((thisWord+"_down", time()))
						pressed = True
					if((time()-lastTime)>=0.5):
						lastTime = time()
						#print thisWord
						#words.put((thisWord, time()))
						hwindex+=1
						#print hwindex
						#print np.sum(thresh)
						#print normalArea
					#else:
					#	print "TIME: ", time()-lastTime
					#flag = False
				else:
					flag = False
					if(pressed):
						words.put((thisWord+"_up", time()))
						pressed = False
					if "-s" in argv:
						cv2.imshow(thisWord+"Failure", thresh)
					#print "Failed!", abs(np.sum(thresh)-normalArea), "\n\t", np.sum(thresh), " ", normalArea
			#elif flip:
			#	flag = False
			#	normalArea = normalArea+np.sum(thresh)
			#	normalArea /= 2
			else:
				if(pressed):
					words.put((thisWord+"_up", time()))
					pressed = False
				flag = False
	
			if(not flag):
				lastTime = -1
				#print "RESET"
				#print "Adjusted to: ", normalArea
			flip = not flip
			#thresh = cv2.dilate(thresh, None)
			(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
	
			#print len(cnts)
			cv2.drawContours(f, cnts, -1, 10, -1)
			if "-s" in argv:
				cv2.imshow(thisWord+"f", f)
			if "-s" in argv:
				cv2.imshow(thisWord+"t", thresh)
			cv2.waitKey(1)
Ejemplo n.º 36
0
def motion_detection():
    start = time.time()
    # Assigning our static_back to None
    static_back = None
    # Infinite while loop to treat stack of image as video
    while True:
        # Reading frame(image) from video
        check, frame = video.read()

        # Converting color image to gray_scale image
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # Converting gray scale image to GaussianBlur
        # so that change can be find easily
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # In first iteration we assign the value
        # of static_back to our first frame
        if static_back is None:
            static_back = gray
            continue

        if (abs(start - time.time()) > 2):
            static_back = gray
            start = time.time()

        # Difference between static background
        # and current frame(which is GaussianBlur)
        diff_frame = cv2.absdiff(static_back, gray)

        # If change in between static background and
        # current frame is greater than 30 it will show white color(255)
        thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1]
        thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)

        # Finding contour of moving object
        (image, cnts, hier) = cv2.findContours(thresh_frame.copy(),
                                               cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_SIMPLE)

        # Use for drawing the found conturs
        # cv2.drawContours(frame,cnts,-1,(0, 0, 255))

        for contour in cnts:
            if cv2.contourArea(contour) < 1000:
                continue

            rect = cv2.minAreaRect(contour)
            box = cv2.boxPoints(rect)
            box = numpy.int0(box)
            cv2.drawContours(frame, [box], 0, (0, 0, 255), 2)

        # Displaying the frame
        cv2.putText(frame, 'Motion', (25, 25), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 2)
        cv2.imshow("Detector with states", frame)

        key = cv2.waitKey(1)
        # if q entered whole process will stop
        if key == ord('q'):
            return -1
        # if n entered change state to pedestrian detection
        if key == ord('n'):
            return 1
Ejemplo n.º 37
0
  def imageCallback(self, msg):
    found = False
    try:
      frame = self.bridge.imgmsg_to_cv2(msg, "bgr8")
    except Exception as e:
      print('failed to convert ')
      print(e)
      logging.error(traceback.format_exc())
    #destRGB = cv2.cvtColor(srcBGR, cv2.COLOR_BGR2RGB)
    tCurrImg = msg.header.stamp

    # Setup Image Processing Variables
    redLower1 = (150, 100, 100)
    redUpper1 = (180, 255, 255)
    redLower2 = (0, 100, 100)
    redUpper2 = (20, 255, 255)
    v = (0,0)
    
    # resize the frame, blur it, and convert it to the HSV
    # color space
    frame = imutils.resize(frame, width=600)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    grey = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

    # construct a mask for the color "red", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    mask = cv2.inRange(hsv, redLower1, redUpper1)
    mask = mask | cv2.inRange(hsv, redLower2, redUpper2)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
      cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    center = None
    print('Processed frame, found '+str(len(cnts))+' contours')
    # only proceed if at least one contour was found
    
    if len(cnts) > 0:
      # find the largest contour in the mask, then use
      # it to compute the minimum enclosing circle and
      # centroid
      c = max(cnts, key=cv2.contourArea)
      ((x, y), radius) = cv2.minEnclosingCircle(c)
      M = cv2.moments(c)
      center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
      print('Found balloon '+ str(c.shape))
      inPts = np.transpose(np.array([c[:,0,0],c[:,0,1]],dtype=np.float32))
      found = True
      if(self.tLastImg.to_sec()>0):
        prevGrey = cv2.cvtColor(cv2.GaussianBlur(self.prevFrame,(11,11),0),cv2.COLOR_BGR2GRAY)
        pts, st, err = cv2.calcOpticalFlowPyrLK(prevGrey,grey,np.float32([inPts]),None)#,**lk_params)
        good_old = c[st==1]
        good_new = pts[np.transpose(st)==1]
        s = np.sum(good_new-good_old,axis=0)/len(good_new[:,1]) 
        v = s/(tCurrImg.to_sec() - self.tLastImg.to_sec())#This had better not be dividing by zero...
        print('avg motion is ' + str(s)+'\t and the avg vel is '+str(v))
      # # only proceed if the radius meets a minimum size
      # if radius > 10:
      # 	# draw the circle and centroid on the frame,
      # 	# then update the list of tracked points
      	
      cv2.circle(frame, (int(x), int(y)), int(radius),
        (0, 255, 255), 2)
      cv2.circle(frame, center, 5, (0, 0, 255), -1)

    # save the frame to file
    self.count = self.count + 1
    imName = "image-%04d.jpg" % self.count
    cv2.imwrite(imName,frame)
    key = cv2.waitKey(1) & 0xFF
    self.prevFrame = frame
      
    

    measMsg = Meas()
    if(found and not (math.isnan(v[0]) or math.isnan(v[1]))):
      measMsg.r[0] = center[0] - np.size(frame, 1)/2
      measMsg.r[1] = center[1] - np.size(frame, 0)/2
      measMsg.v[0] = v[0]
      measMsg.v[1] = v[1]
      tCurr = rospy.Time.now() 
      measMsg.tStamp = tCurr.to_sec()
      self.pub.publish(measMsg)

    self.tLastImg = tCurrImg
Ejemplo n.º 38
0
def read_digits(gray, image, debug=False):
    _, gray = cv2.threshold(gray, 100, 255, 0)  # extract white digits
    gray_inv = cv2.bitwise_not(gray)  # turn to black digits

    # to locate digits area
    cnts = cv2.findContours(gray_inv, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    largest_area = sorted(
        cnts,
        key=cv2.contourArea)[-1]  # find LCD = the largest white background
    mask = np.zeros(image.shape, np.uint8)  # all black in mask
    cv2.drawContours(mask, [largest_area], 0, (255, 255, 255),
                     -1)  # make roi area white in mask
    output = cv2.bitwise_and(image, mask)  # pick roi from image
    roi_gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)  # white digits
    _, roi_gray = cv2.threshold(roi_gray, 100, 255,
                                0)  # highlight white digits

    # to find each digit
    thresh = cv2.threshold(roi_gray, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    (x, y, w, h) = cv2.boundingRect(cnts[0])
    roi_small = thresh[y:y + h, x:x + w]
    warped = roi_small.copy()  # black digits

    # the numbers displayed a little bit leaning to right side, to make them upright
    skew = 8

    height, width = warped.shape
    width -= 1
    height -= 1
    rect = np.array([[0, 0], [width, 0], [width, height], [0, height]],
                    dtype="float32")
    dst = np.array(
        [[-skew, 0], [width - skew, 0], [width, height], [0, height]],
        dtype="float32")
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(warped, M, (width + 1, height + 1))
    output = cv2.warpPerspective(output, M, (width + 1, height + 1))

    # segment 2 and segment 5 separated so we do vertical dilate and erode to connect them
    vert_dilate3 = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(1, 3))
    dilation = cv2.dilate(warped, vert_dilate3)
    dilation = cv2.erode(warped, vert_dilate3)  # black digits
    dilation_inv = cv2.bitwise_not(dilation)  # white digits

    # locate each digit
    cnts = cv2.findContours(dilation_inv.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)

    digitCnts = []
    # loop over the digit area candidates
    for _c, c in enumerate(cnts):
        (x, y, w, h) = cv2.boundingRect(c)
        if debug:
            print("Contour {}: w={}, h={}, x={}, y={}".format(_c, w, h, x, y))

        if 10 <= h <= 35 and w <= 20 and y < 20:
            digitCnts.append(c)
    # sort the contours from left-to-right, then initialize the actual digits themselves
    # todo avoid error: ValueError: not enough values to unpack (expected 2, got 0)
    digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]

    # todo if len(digitCnts)>4, pick up y != all other y, because y should be same to all digits, x increase by w
    # current solution is y < 20
    # find exact number
    digits = []
    point = None
    avg_digit_width = 12
    # loop over each of the digits
    for _c, c in enumerate(digitCnts):
        (x, y, w, h) = cv2.boundingRect(c)
        if debug:
            print("Selected contour {}: w={}, h={}, x={}, y={}".format(
                _c, w, h, x, y))
        # manually override the width of number 1
        if w < 9:
            x -= avg_digit_width - w
            w = avg_digit_width
            if debug:
                print("  changed contour : w={}, h={}, x={}, y={}".format(
                    w, h, x, y))
        elif w > avg_digit_width + 1:
            w = avg_digit_width
            point = _c
        roi = dilation_inv[y:y + h, x:x + w]
        # compute the approximate width and height of each segment based on the ROI dimensions.
        (roiH, roiW) = roi.shape
        (dW, dH) = (int(roiW * 0.34), int(roiH * 0.25))
        dHC = int(roiH * 0.1)
        # print("roiH={}, roiW={}, dH={}, dW={}, dHC={}".format(roiH, roiW, dH, dW, dHC))

        # define the coordinates of set of 7 segments
        segments = [
            ((1, 0), (w, dH)),  # top
            ((1, 0), (dW, h // 2)),  # top-left
            ((w - dW, 0), (w - 2, h // 2)),  # top-right
            ((4, (h // 2) - dHC), ((w // 2) + 1, (h // 2) + dHC)),  # center
            ((0, h // 2), (dW, h)),  # bottom-left
            ((w - dW - 2, h // 2), (w, h)),  # bottom-right
            ((0, h - dH), (w - 2, h))  # bottom
        ]
        on = [0] * len(segments)
        # print("segments={}".format(segments))

        if show_image:
            _show_image("ROI {}".format(_c), roi, False)

        # loop over the segments
        for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
            # extract the segment ROI, count the total number of thresholded pixels in the segment, and then compute
            # the area of the segment
            segROI = roi[yA:yB, xA:xB]
            total = cv2.countNonZero(
                segROI)  # (0, 0, 0)=black && (255, 255, 255)=white
            area = (xB - xA) * (yB - yA)
            # if the total number of non-zero pixels is greater than 50% of the area, mark the segment as "on"
            if debug:
                print(i, total / float(area))
            if total / float(area) > 0.5:
                on[i] = 1

            if show_image:
                _show_image("Segment {} of ROI {}".format(i, _c), segROI)

        # lookup the digit and draw it on the image give -1 for lookup failure
        if tuple(on) in DIGITS_LOOKUP.keys():
            digit = DIGITS_LOOKUP[tuple(on)]
        else:
            digit = -1
            if debug:
                print(on)
                cv2.imshow("ROI", roi)

        digits.append(digit)
        # deal with decimal point
        if point is not None and '.' not in digits:
            digits.append('.')
        cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0),
                      1)  # (0, 255, 0)=green
        cv2.putText(output, str(digit), (x + 1, y + 25),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 1)

    # assert decimal point
    if point is None and len(digits) >= 4:
        digits.insert(-4, '.')

    # assert number -1
    if -1 in digits:
        number = 0
    else:
        number = float(''.join(map(str, digits)))

    # display the digits
    _show_image("Image read digits={}".format(number), image, destroy=False)
    return number
        j = 12
        m = 0
        while j > 0:
            # print(j)
            img = gray_org_img
            green = img_org.copy()

            cross_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (j, j))
            img = cv2.dilate(img, cross_kernel, iterations = 1)

            # blur = gaussianblur(img,cross_kernel)
            blur = cv2.GaussianBlur(img, (9, 9), 0)

            th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,9,2)

            imgEdge,contours,hierarchy = cv2.findContours(th3, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

            # lsd = cv2.createLineSegmentDetector(0)

            # lines, _ = lsd.detect(th3)[:2]

            # # print(w)

            # # th3 = lsd.drawSegments(th3,lines)

            # for i in range(len(lines)):
            #     for x1,y1,x2,y2 in lines[i] :
            #         cv2.line(th3,(x1,y1),(x2,y2),(0,0,0),10)

            cv2.imwrite('mono_' + str(j) + '.jpg',th3)
Ejemplo n.º 40
0
Dist = distort.distortion()
cam = cv2.VideoCapture(0)

while (cam.isOpened()):
    ret, img = cam.read()

    dst = Dist.Undistort(img)

    con1 = dst.copy()
    con2 = dst.copy()

    gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)

    ret, gray = cv2.threshold(gray, 200, 255, 0)

    a, contours1, b = cv2.findContours(gray, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
    a, contours2, b = cv2.findContours(gray, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_TC89)

    cv2.drawContours(con1, contours1[1], -1, (0, 0, 255), 5)
    cv2.drawContours(con2, contours2, -1, (0, 0, 255), 5)
    cv2.imshow("thresh", gray)
    cv2.imshow("TREE", con1)
    cv2.imshow("CCOMP", con2)

    if (cv2.waitKey(1) == 27):
        break

cv2.destroyAllWindows()

cam.release()
engine.setProperty('voice', voices[1].id)
engine.setProperty('rate', 150)

while True:
    check, frame = video.read()
    status=0
    gray_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    gray_frame=cv2.GaussianBlur(gray_frame,(25,25),0)

    if baseline_image is None:
        baseline_image=gray_frame
        continue

    delta=cv2.absdiff(baseline_image,gray_frame)
    threshold=cv2.threshold(delta, 30, 255, cv2.THRESH_BINARY)[1]
    (contours,_)=cv2.findContours(threshold,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    for contour in contours:
        if cv2.contourArea(contour) < 10000:
            continue
	    status=1
   (x, y, w, h)=cv2.boundingRect(contour)
   cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 1)
   status_list.append(status)
	
	
    if status_list[-1]==1 and status_list[-2]==0:
        t = threading.Thread(target=thread_voice_alert, args=(engine,))
        t.start()

Ejemplo n.º 42
0
def stable_marker_detector(gray):
    """
    detect stable marker
    """

    gray_orig = gray.copy()
    cv2.imwrite("save/gray_orig.png", gray_orig)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, 50, 200, 255)
    cv2.imwrite("save/edge.png", edged)
    gray_inv = cv2.bitwise_not(gray)
    cv2.imwrite("save/gray_inv.png", gray_inv)
    ret, gray = cv2.threshold(gray_inv, 200, 255, 0)  # extract white area
    cv2.imwrite("save/gray.png", gray)
    """
    detect stable marker
    """
    gray_m = cv2.threshold(gray_orig, 100, 255, 0)[1]  # extract white area
    cv2.imwrite("save/gray_m.png", gray_m)

    cnts = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    largest_area = sorted(cnts, key=cv2.contourArea)[-1]
    (x, y, w, h) = cv2.boundingRect(largest_area)
    m = gray_orig.copy()

    mask_gray = np.zeros(gray.shape, np.uint8)  # all black in mask
    cv2.drawContours(mask_gray, [largest_area], 0, (255, 255, 255), -1)
    cv2.imwrite("save/contour.png", mask_gray)
    mask_gray_inv = cv2.bitwise_not(mask_gray)
    cv2.imwrite("save/contour_inv.png", mask_gray_inv)
    # make outer of m to white
    m[0:y, :] = 255
    m[y + h:, :] = 255
    m[y:y + h, 0:x] = 255
    m[y:y + h, x + w:] = 255
    cv2.imwrite("save/m2.png", m)
    m = cv2.bitwise_and(m, mask_gray)  # pick roi from image
    cv2.imwrite("save/m3.png", m)
    m[0:y, :] = 0
    m[y + h:, :] = 0
    m[y:y + h, 0:x] = 0
    m[y:y + h, x + w:] = 0
    cv2.imwrite("save/m4.png", m)
    m = cv2.threshold(m, 100, 255, 0)[1]  # extract white area
    cv2.imwrite("save/m5.png", m)

    cnts = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    print('{} contours found'.format(len(cnts)))
    # cv2.imshow('Contours', m)
    # cv2.waitKey(0)

    is_stable = False
    if len(cnts) > 0:
        for _c, c in enumerate(cnts):
            # compute the bounding box of the contour
            (x, y, w, h) = cv2.boundingRect(c)
            # plot contours
            # if show_image:
            print("marker {}: w={}, h={}, x={}, y={}".format(_c, w, h, x, y))
            # cv2.drawContours(m, cnts, _c, (255, 255, 255), 1)
            # cv2.imshow('Contours', m)
            # cv2.waitKey(0)
            if w < 10 and h < 10 and w * h < 50:
                is_stable = True
                print("\nScale is stable!\n")

    return is_stable
Ejemplo n.º 43
0
                required=True,
                help="path to output directory of annotations")
args = vars(ap.parse_args())

imagePaths = list(paths.list_images(args["input"]))
counts = {}

for (i, imagePath) in enumerate(imagePaths):
    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    try:
        image = cv2.imread(imagePath)
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
        thresh = cv2.threshold(gray, 0, 255,
                               cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4]

        for c in cnts:
            (x, y, w, h) = cv2.boundingRect(c)
            roi = gray[y - 5:y + 5 + h, x - 5:x + 5 + w]
            cv2.imshow("ROI", imutils.resize(roi, width=28))
            key = cv2.waitKey(0)

            if key == ord("'"):
                print("[INFO] ignoring character")
                continue

            key = chr(key).upper()
            dirPath = os.path.sep.join([args["annot"], key])
Ejemplo n.º 44
0
cap = cv.VideoCapture(0)

font = cv.FONT_HERSHEY_COMPLEX

while True:
    # cap.read() reads an frame and stores it in the variable frame
    _, frame = cap.read()
    cv.imshow("oringinal", frame)
    # this is to convert the RGB to grayscale as it will be easier to compute and there is no use of color in detecting the shape of an objects and moreover the canny algorithm will only work for grayscale images
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    cv.imshow("grayscale", gray)
    # this is to detect all the edges in the frame (https://towardsdatascience.com/canny-edge-detection-step-by-step-in-python-computer-vision-b49c3a2d8123)
    edge = cv.Canny(gray, 100, 200)
    # this is to give all the boundaries of closed objects(contours) in the image.It uses binary image to process it.Since the image form edge detection algorithm is an binary image , there is no need for converting it.
    contour, heire = cv.findContours(
        edge, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
    for cnt in contour:
        # calculates the area of contours
        area = cv.contourArea(cnt)
        # this is avoid detecting very small objects in the frame
        if area > 200:
            # this outlines the contours in the frame with green color
            cv.drawContours(frame, [cnt], 0, (0, 255, 0), 2)
            # this is to approximate a polygon with another polygon with less vertices
            approx = cv.approxPolyDP(cnt, 0.02*cv.arcLength(cnt, True), True,)
            x = approx.ravel()[0]
            y = approx.ravel()[1]
            if len(approx) == 3:
                cv.putText(frame, "Triangle", (x, y), font, 1, (255, 0, 0))
            elif len(approx) == 4:
                cv.putText(frame, "rectangle", (x, y), font, 1, (255, 0, 0))
Ejemplo n.º 45
0
def make_segments(count, thresh, blur, image, path, newpath,
                  actual_contours_path, segment_path):
    # print image.shape
    # fo = open("processing.txt", "a")
    # print path
    coordinates_of_segments = {}
    temp_coordinates = []
    ret, th = cv2.threshold(blur, thresh, 255, cv2.THRESH_BINARY)

    # individual = 'croppedSegments/'
    xpath = newpath
    imsave(newpath + '/thresholded_image.jpg', th)
    filename = list(path.split('/'))
    filename = filename[-1]
    filename = list(filename.split('\\'))
    filename = filename[-1]
    # print filename
    imsave(thresholded_path + filename, th)

    cnts, hierarchy = cv2.findContours(th, 1, 2)

    thresholded_image = cv2.cv.LoadImage(newpath + '/thresholded_image.jpg')
    pathses = newpath + "/size.txt"
    # delete_file(newpath + '/thresholded_image.jpg')
    actual_image = cv2.imread(path)
    file_name = []
    file_name = path.split("\\")
    file = file_name[-1]
    coordinates_of_segments[file] = []
    # print file,thresh
    loaded_image = cv2.cv.LoadImage(path)
    rect_image = actual_image
    contour_list = []
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    # loop over the contours
    number = 0
    red_number = 0
    green_number = 0
    global mc_and_dc_list
    segment_list = []
    original_segment_list = []
    coordinates_list = []
    xmin = 10000
    ymin = 10000
    xmax = 0
    ymax = 0
    w1 = 0
    h1 = 0
    counters = 0
    for c in cnts:

        approx = cv2.approxPolyDP(c, 0.009 * cv2.arcLength(c, True), True)
        area = cv2.contourArea(c)

        if ((len(approx) > 8) & (area < 4000) & (area > 100)):

            number += 1
            global w, h
            center, angle, w, h, x1, y1, x2, y2 = get_coordinates(
                actual_image, c, coordinates_list)
            if (x1 - w < xmin):
                xmin = x1 - w
            if (x2 + w > xmax):
                xmax = x2 + w
            if (y1 - h < ymin):
                ymin = y1 - h
            if (y2 + h > ymax):
                ymax = y2 + h
            w1 += w
            h1 += h
            counters = counters + 1
            #            print(x1-w,y1-h,x2+w,y2+h,counters,h1,h)
            crop_th = crop_image(thresholded_image, center, angle, w, h)
            crop = crop_image(loaded_image, center, angle, w, h)
            image = crop
            # create_dir(individual+file+'/')
            cv2.cv.SaveImage(newpath + '/' + 'contour_' + str(number) + '.jpg',
                             crop_th)
            cv2.cv.SaveImage(
                actual_contours_path + 'contour_' + str(number) + '.jpg', crop)
            # cv2.cv.SaveImage(individual+ file+'/' + str(random.randint(1,50000))  + '.jpg',crop)
            temp_image = PIL.Image.open(newpath + '/' + 'contour_' +
                                        str(number) + '.jpg')
            original_temp_image = PIL.Image.open(actual_contours_path +
                                                 'contour_' + str(number) +
                                                 '.jpg')
            segment_list.append(temp_image)
            original_segment_list.append(original_temp_image)

            # image = original_temp_image
            image = skimage.color.rgb2gray(
                skimage.io.imread(actual_contours_path + 'contour_' +
                                  str(number) + '.jpg'))
            delete_file(newpath + '/' + 'contour_' + str(number) + '.jpg')
            delete_file(actual_contours_path + 'contour_' + str(number) +
                        '.jpg')
            total = []
            h = image.shape[0]
            w = image.shape[1]

            for x in xrange(h):
                s = 0
                for y in xrange(w):
                    s += image[x][y]
                total.append(s)

            avg = [sum(total) / len(total)] * len(total)
            T = list(range(len(total)))
            t = np.array(T)
            power = np.array(total)
            totalnew = np.linspace(t.min(), t.max(), len(total))
            power_smooth = spline(t, power, totalnew)

            # ax = axs[1]
            sigma = 3
            x_g1d = gaussian_filter1d(totalnew, sigma)
            y_g1d = gaussian_filter1d(power_smooth, sigma)

            index = []
            temp = 0
            for i in xrange(1, len(y_g1d) - 1):
                if y_g1d[i] > y_g1d[i - 1] and y_g1d[i] > y_g1d[i + 1]:
                    index.append(i)

            if len(index) == 0:
                x_g1d = totalnew
                y_g1d = power_smooth
                for i in xrange(1, len(y_g1d) - 1):
                    if y_g1d[i] > y_g1d[i - 1] and y_g1d[i] > y_g1d[i + 1]:
                        index.append(i)

            cm = []

            for x in xrange(1, len(index)):
                for y in xrange(x):
                    if y_g1d[index[y]] < y_g1d[index[x]]:
                        temp = index[y]
                        index[y] = index[x]
                        index[x] = temp

            if len(index) > 0:
                mx = [y_g1d[index[0]]] * len(total)
                # plt.plot(t,mx)
                cent1 = index[0]
                # ax=axs[0]
                cm1 = (w / 2, cent1)
                cv2.circle(image, cm1, 3, (0, 1, 0), -1)
                cm.append(cm1)
            DCcount = 0

            if len(index) > 1 and y_g1d[index[1]] > avg[0] and abs(
                    y_g1d[cent1] - y_g1d[index[1]]) < abs(y_g1d[index[1]] -
                                                          y_g1d[int(avg[0])]):
                # if len(index)>1 and total[index[1]]>avg[0] and abs(cent1-index[1])>h/a and abs(total[cent1]-total[index[1]])<abs(total[index[1]]-total[int(avg[0])]):
                mx2 = [y_g1d[index[1]]] * len(total)
                # plt.plot(t,mx2)
                cent2 = index[1]
                cm2 = (w / 2, cent2)
                cv2.circle(image, cm2, 3, (0, 1, 0), -1)
                cm.append(cm2)
                DCcount += 1

            if len(cm) == 2:
                red_number += 1
                rect = cv2.minAreaRect(c)
                box = cv2.cv.BoxPoints(rect)
                temp_box = list(box)
                temp_box.append((1, 1))
                box = np.int0(box)

                temp_coordinates.append(temp_box)
                # print "coordinates of dc"
                # print box
                cv2.drawContours(actual_image, [box], 0, (0, 0, 255), 2)
            else:
                green_number += 1
                rect = cv2.minAreaRect(c)
                box = cv2.cv.BoxPoints(rect)
                temp_box = list(box)
                temp_box.append((0, 0))
                box = np.int0(box)

                temp_coordinates.append(temp_box)
                # print "coordinates of mc"
                # print box
                cv2.drawContours(actual_image, [box], 0, (0, 255, 0), 2)
    f = open(pathses, 'w')
    ##print(w1,h1,counters,w1/(counters),h1/(counters))
    #print(pathses)
    #print("dsfj sdjfhjdsfdsfjdsjf jdsfjds jfdsjjf sjfdjsfjdsh")
    f.write('{}  {}  {}  {}  {}  {}'.format(xmin, ymin, xmax, ymax,
                                            w1 / (counters), h1 / (counters)))
    f.close()
    coordinates_of_segments[file] = temp_coordinates
    cv2.imwrite(segment_path + filename, actual_image)
    time.sleep(0.30)
    #cv2.waitKey(50)
    print "^^^^^^^^^^^^^^", time.time()
    i = count + 1
    # while soft_sheet['A'+str(i)]==None:
    soft_sheet['A' + str(i)] = file
    soft_sheet['B' + str(i)] = number
    if not DCcount == 0:
        soft_sheet['C' + str(i)] = DCcount
    pathses, tail = os.path.split(pathses)
    pathses, tail = os.path.split(pathses)
    soft_data.save('data.xlsx')
    mc_and_dc_list.append([red_number, green_number, number, tail[8:]])
    return mc_and_dc_list, number, segment_list, original_segment_list, coordinates_list, coordinates_of_segments
Ejemplo n.º 46
0
    def capture_thread(self, IPinver):
        global frame_image, camera
        ap = argparse.ArgumentParser()  # OpenCV initialization
        ap.add_argument("-b",
                        "--buffer",
                        type=int,
                        default=64,
                        help="max buffer size")
        args = vars(ap.parse_args())
        pts = deque(maxlen=args["buffer"])

        font = cv2.FONT_HERSHEY_SIMPLEX

        camera = picamera.PiCamera()
        camera.resolution = (640, 480)
        camera.framerate = 20
        rawCapture = PiRGBArray(camera, size=(640, 480))

        context = zmq.Context()
        footage_socket = context.socket(zmq.PUB)
        print(IPinver)
        footage_socket.connect("tcp://%s:5555" % IPinver)

        avg = None
        motionCounter = 0
        lastMovtionCaptured = datetime.datetime.now()

        for frame in camera.capture_continuous(rawCapture,
                                               format="bgr",
                                               use_video_port=True):
            frame_image = frame.array
            timestamp = datetime.datetime.now()

            if FindColorMode:
                ####>>>OpenCV Start<<<####
                hsv = cv2.cvtColor(frame_image, cv2.COLOR_BGR2HSV)
                mask = cv2.inRange(hsv, colorLower, colorUpper)  # 1
                mask = cv2.erode(mask, None, iterations=2)
                mask = cv2.dilate(mask, None, iterations=2)
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
                center = None
                if len(cnts) > 0:
                    cv2.putText(
                        frame_image,
                        "Target Detected",
                        (40, 60),
                        font,
                        0.5,
                        (255, 255, 255),
                        1,
                        cv2.LINE_AA,
                    )
                    c = max(cnts, key=cv2.contourArea)
                    ((x, y), radius) = cv2.minEnclosingCircle(c)
                    M = cv2.moments(c)
                    center = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))
                    X = int(x)
                    Y = int(y)
                    if radius > 10:
                        cv2.rectangle(
                            frame_image,
                            (int(x - radius), int(y + radius)),
                            (int(x + radius), int(y - radius)),
                            (255, 255, 255),
                            1,
                        )

                    if Y < (240 - tor):
                        error = (240 - Y) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.up(outv)
                        Y_lock = 0
                    elif Y > (240 + tor):
                        error = (Y - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.down(outv)
                        Y_lock = 0
                    else:
                        Y_lock = 1

                    if X < (320 - tor * 3):
                        error = (320 - X) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.lookleft(outv)
                        servo.turnLeft(coe_Genout(error, 64))
                        X_lock = 0
                    elif X > (330 + tor * 3):
                        error = (X - 240) / 5
                        outv = int(round((pid.GenOut(error)), 0))
                        servo.lookright(outv)
                        servo.turnRight(coe_Genout(error, 64))
                        X_lock = 0
                    else:
                        move.motorStop()
                        X_lock = 1

                    if X_lock == 1 and Y_lock == 1:
                        switch.switch(1, 1)
                        switch.switch(2, 1)
                        switch.switch(3, 1)
                        moveCtrl(ultra.checkdist(), back_R, forward_R)
                    else:
                        move.motorStop()
                        switch.switch(1, 0)
                        switch.switch(2, 0)
                        switch.switch(3, 0)

                else:
                    cv2.putText(
                        frame_image,
                        "Target Detecting",
                        (40, 60),
                        font,
                        0.5,
                        (255, 255, 255),
                        1,
                        cv2.LINE_AA,
                    )
                    move.motorStop()
                ####>>>OpenCV Ends<<<####

            if WatchDogMode:
                gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)
                gray = cv2.GaussianBlur(gray, (21, 21), 0)

                if avg is None:
                    print("[INFO] starting background model...")
                    avg = gray.copy().astype("float")
                    rawCapture.truncate(0)
                    continue

                cv2.accumulateWeighted(gray, avg, 0.5)
                frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))

                # threshold the delta image, dilate the thresholded image to fill
                # in holes, then find contours on thresholded image
                thresh = cv2.threshold(frameDelta, 5, 255,
                                       cv2.THRESH_BINARY)[1]
                thresh = cv2.dilate(thresh, None, iterations=2)
                cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = imutils.grab_contours(cnts)
                # print('x')
                # loop over the contours
                for c in cnts:
                    # if the contour is too small, ignore it
                    if cv2.contourArea(c) < 5000:
                        continue

                    # compute the bounding box for the contour, draw it on the frame,
                    # and update the text
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(frame_image, (x, y), (x + w, y + h),
                                  (128, 255, 0), 1)
                    text = "Occupied"
                    motionCounter += 1
                    # print(motionCounter)
                    # print(text)
                    LED.colorWipe(255, 16, 0)
                    lastMovtionCaptured = timestamp
                    switch.switch(1, 1)
                    switch.switch(2, 1)
                    switch.switch(3, 1)

                if (timestamp - lastMovtionCaptured).seconds >= 0.5:
                    LED.colorWipe(255, 255, 0)
                    switch.switch(1, 0)
                    switch.switch(2, 0)
                    switch.switch(3, 0)

            if FindLineMode:
                cvFindLine()

            cv2.line(frame_image, (300, 240), (340, 240), (128, 255, 128), 1)
            cv2.line(frame_image, (320, 220), (320, 260), (128, 255, 128), 1)

            if FindLineMode and not frameRender:
                encoded, buffer = cv2.imencode(".jpg", frame_findline)
            else:
                encoded, buffer = cv2.imencode(".jpg", frame_image)

            jpg_as_text = base64.b64encode(buffer)
            footage_socket.send(jpg_as_text)

            rawCapture.truncate(0)
Ejemplo n.º 47
0
import cv2
import numpy as np

# Finding Contour and Moments
img = cv2.imread('testContours.png')
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#for c in contours:
#    im = cv2.drawContours(img, [c], 0, (0, 255, 0), 3)


M = []
C = []
for c in contours
	# Moments os the contours are in hierarchy variable       
    M.append(cv2.moments(c))    

    # Centroid of all contours
    C.append([int(M[index]['m10']/M[index]['m00']), int(M[index]['m01']/M[index]['m00'])])
print(C)
#img = cv2.circle(img, tuple(C[index]), 2, (255, 0, 0), 2)
#cv2.imshow('Original', img)
#cv2.waitKey(0)
Ejemplo n.º 48
0
 def _find_exterior_contours(cls, img):
     ret = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
     if len(ret) == 2:
         return ret[0]
     elif len(ret) == 3:
         return ret[1]
Ejemplo n.º 49
0
import time

robot = jetbot.JetRobot()
cap = cv2.VideoCapture(-1)

while True:
    _, img = cap.read()
    x_center, y_center = TamAnh.ToaDoTam(img)
    ###############  Red color
    low_red = np.array([161, 155, 84])
    hight_red = np.array([179, 255, 255])
    hsv_frame = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    red_mask = cv2.inRange(hsv_frame, low_red, hight_red)
    #cv2.imshow("Mask", red_mask)
    ###############
    contours, _ = cv2.findContours(red_mask, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)
    contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
    for cnt in contours:
        (x, y, w, h) = cv2.boundingRect(cnt)
        x_medium = int(x + w / 2)
        y_medium = int(y + h / 2)
        #########
        cv2.circle(img, (x_medium, y_medium), 20, (0, 255, 0))
        cv2.line(img, (x_center, y_center), (x_medium, y_medium),
                 (0, 255, 255), 2)
        ######### Xe tu lay
        vitri = x_center - x_medium
        sms = ""
        if vitri > 20:
            sms = "Trai: " + str(vitri)
            robot.left_motor.value = 0
def solve_captcha():
    with open(MODEL_LABELS_FILENAME, "rb") as f:
        lb = pickle.load(f)

    # loop over the image paths
    # for image_file in captcha_image_files:
        # Load the image and convert it to grayscale
    found = False
    captcha_text = ''
    while found == False:
        # image_file = crawl_img(request.args.get('Id'))
        try:
            name = request.form.get('base64Str')
            # print(name)
        except:
            print('khong parse duoc')
        image_file = saveBase64ToFile(request.form.get('base64Str'))

        de = 0
        image = cv2.imread(image_file)
        try:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        except:
            return json.dumps({
                "status": False,
                "error": "Convert to grey color fail"
            })
        # Add some extra padding around the image
        image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
        # threshold the image (convert it to pure black and white)
        thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
        # find the contours (continuous blobs of pixels) the image
        contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # Hack for compatibility with different OpenCV versions
        contours = contours[0] if imutils.is_cv2() else contours[1]

        letter_image_regions = []
        # inside of each one
        for contour in contours:
            # Get the rectangle that contains the contour
            (x, y, w, h) = cv2.boundingRect(contour)
            #
            # # Compare the width and height of the contour to detect letters that
            # # are conjoined into one chunk
            # if w / h > 1.7:
            #     # This contour is too wide to be a single letter!
            #     # Split it in half into two letter regions!
            #     half_width = int(w / 2)
            #     letter_image_regions.append((x, y, half_width, h))
            #     letter_image_regions.append((x + half_width, y, half_width, h))
            # else:
            #     # This is a normal letter by itself
            #     letter_image_regions.append((x, y, w, h))
            letter_image_regions.append((x, y, w, h))

        # If we found more or less than 4 letters in the captcha, our letter extraction
        # didn't work correcly. Skip the image instead of saving bad training data!
        if len(letter_image_regions) != 4:
            de=de+1
            return json.dumps({
                "status": False,
                "error": "Letter < 4"
            })

        # Sort the detected letter images based on the x coordinate to make sure
        # we are processing them from left-to-right so we match the right image
        # with the right letter
        letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])

        # Create an output image and a list to hold our predicted letters
        output = cv2.merge([image] * 3)
        predictions = []

        # loop over the lektters
        for letter_bounding_box in letter_image_regions:
            # Grab the coordinates of the letter in the image
            x, y, w, h = letter_bounding_box

            # Extract the letter from the original image with a 2-pixel margin around the edge
            letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
            # cv2.imshow("Output", letter_image)
            # cv2.waitKey()

            letter_image_temp = letter_image


            # Re-size the letter image to 20x20 pixels to match training data
            try:
                letter_image = resize_to_fit(letter_image, 20, 20)
            except:
                return json.dumps({
                    "status": False,
                    "error": "Fit image size fail"
                })
            # cv2.imshow("Output", letter_image)
            # cv2.waitKey()
            # Turn the single image into a 4d list of images to make Keras happy
            letter_image = np.expand_dims(letter_image, axis=2)
            letter_image = np.expand_dims(letter_image, axis=0)

            # Ask the neural network to make a prediction
            global graph
            try:
                with graph.as_default():
                    prediction =  model.predict(letter_image)
                    letter =  lb.inverse_transform(prediction)[0]
                    predictions.append(letter)
            except:
                return json.dumps({
                    "status": False,
                    "error": "get prediction fail"
                })

            # Convert the one-hot-encoded prediction back to a normal letter
            

            # randomstr = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
            # cv2.imwrite("test2/" + letter + "/" + randomstr + ".png", letter_image_temp)


            # draw the prediction on the output image
            # cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
            cv2.putText(output, letter, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 2)

        # Print the captcha's text
        captcha_text = "".join(predictions)
        print(image_file)
        # Show the annotated image
        # cv2.imshow("Output", output)
        # cv2.waitKey()
        found = True
        if found == True:
            return json.dumps({
                "status": True,
                "data": captcha_text
            })
Ejemplo n.º 51
0
def detect_ROI_yellow_orange(img_in, img, para1, para2, switch):
    yellow_low = para1
    yellow_high = para2

    switch = switch

    img_draw = np.copy(img_in)
    img_copy = np.copy(img_in)
    img_hsv = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV)
    yellow_mask = cv2.inRange(img_hsv, np.array(yellow_low, dtype="uint8"),
                              np.array(yellow_high, dtype="uint8"))

    images_path = "output/test/warning/{}".format(img[1][0:])

    contours = cv2.findContours(yellow_mask.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)[0]
    # print len(contours)

    roi_list = []
    pos_list = []

    con_id = 0
    if contours is not None:
        for c in contours:
            con_id += 1
            con_area = cv2.contourArea(c)
            if con_area > 100:

                cv2.drawContours(img_draw, [c], -1, (0, 255, 0), 1)
                x, y, w, h = cv2.boundingRect(c)
                cv2.rectangle(img_draw, (x, y), (x + w, y + h), (0, 255, 255),
                              1)
                # print w*h

                roi_yellow = yellow_mask[y:y + h, x:x + w]
                num_white_pixel = np.sum(roi_yellow) / 255
                num_total_pixel = w * h
                percentage = float(num_white_pixel) / num_total_pixel
                ratio = float(h) / w
                # print ratio

                # print "contour{}: Area:{},white_pixels{},percentage{}".format(con_id, con_area,num_white_pixel, percentage)

                if percentage > 0.1 and 0.5 < ratio < 2:
                    cv2.rectangle(img_draw, (x, y), (x + w, y + h),
                                  (0, 0, 255), 1)
                    # roi = img_copy[y: y + h, x:x + w]
                    size = max(w, h)

                    roi_BGR = img_copy[y:y + size, x:x + size]
                    roi_yellow = yellow_mask[y:y + size, x:x + size]

                    if switch == 1:
                        roi_list.append(roi_yellow)
                    if switch == 2:
                        roi_list.append(roi_BGR)
                    pos_list.append((x, y, size, size))

                # pos_list.sort(key = lambda x:x[2]*x[3], reverse=True)

    #cv2.imwrite("output/test/warning/{}_yellow_mask.png".format(img[1][0:]), yellow_mask)
    #cv2.imwrite("output/test/warning/{}_box.png".format(img[1][0:]), img_draw)

    roi_id = 0

    #if not os.path.exists(images_path):
    # print roi_id
    #os.makedirs(images_path)
    # print "There are {} ROI".format(len(roi_list))

    for roi in roi_list:
        roi_id += 1
        # print roi.shape
        #cv2.imwrite(os.path.join(images_path, "yellow_roi_{}.png".format(roi_id)), roi)

    return roi_list, pos_list
def mask_change(source_img_path,add_img_path,save_path,rgb):

    source_image = cv2.imread(source_img_path)
    add_image = cv2.imread(add_img_path)

    # 将界限值往左右移动10单位
    color_lower = [i-10 for i in rgb]
    print(color_lower)
    color_upper = [i+10 for i in rgb]
    print(color_upper)

    lower_blue = np.array(color_lower)
    upper_blue = np.array(color_upper)

    # 将add所选区域涂白,其他地方都是黑的
    add_mask_tem = cv2.inRange(add_image, lower_blue, upper_blue)

    # 求轮廓
    add_img_area, add_contours_area, add_hierarchy_area = cv2.findContours(add_mask_tem, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
    # 求面积
    add_allarea = cv2.contourArea(add_contours_area[0])

    # 将source图片里面的黑色部分全部涂白,其他地方为黑色,奖source文件里面的(0,0,0)-(30,30,30)范围视为黑色
    source_mask_tem = cv2.inRange(source_image, np.array([0, 0, 0]), np.array([30, 30, 30]))

    source_mask = 255 - source_mask_tem

    ##########
    # cv2.waitKey(0)
    # 膨胀操作的卷集核
    # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
    # 膨胀操作
    # mask_binary_dilation = cv2.dilate(mask, kernel)
    #
    # add_image_dilation = cv2.dilate(add_image, kernel)
    #
    # img_mask_rgb = cv2.bitwise_and(add_image_dilation, add_image_dilation, mask=mask_binary_dilation)
    ##########

    # 将黑白化的add图片和source图片取交集(不允许add添加区域超过source中已经涂色的区域),取出真正需要的部分为白色,其余地方为黑色
    add_mask = cv2.bitwise_and(source_mask_tem, add_mask_tem)

    # 得到超出的部分,也就是涂色越界的部分
    add_mask_xor = cv2.bitwise_xor(add_mask, add_mask_tem)

    xor_img, xor_contours, xor_hierarchy = cv2.findContours(add_mask_xor, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    xor_area = 0;
    if len(xor_contours) is 0:
        xor_area = 0
    else:
        for i in range(len(xor_contours)):
            xor_area += cv2.contourArea(xor_contours[i])

    # 越界的部分的面积占新增全部部分的面积的比例
    if(xor_area/add_allarea > 0.01):
        messagebox.showinfo("错误", add_img_path+"\n  新增面积对原有有色区域有大面积覆盖,请检查你的涂色结果")
        return

    # 取出add图片中真正有效的涂色区域
    add_mask_effective = cv2.bitwise_and(add_image, add_image, mask=add_mask)

    source_image_mask = cv2.bitwise_and(source_image, source_image,mask=source_mask)

    # 有效涂色区域和source文件叠加
    final_image = add_mask_effective + source_image_mask

    cv2.imwrite(save_path, final_image)
Ejemplo n.º 53
0
def detect_ROI_red(img_in, img):

    red_low_1 = (0, 50, 50)
    red_low_2 = (5, 255, 255)
    red_high_1 = (175, 50, 50)
    red_high_2 = (180, 255, 255)

    img_draw = np.copy(img_in)
    img_copy = np.copy(img_in)
    img_hsv = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV)
    red_mask_low = cv2.inRange(img_hsv, np.array(red_low_1, dtype="uint8"),
                               np.array(red_low_2, dtype="uint8"))
    red_mask_high = cv2.inRange(img_hsv, np.array(red_high_1, dtype="uint8"),
                                np.array(red_high_2, dtype="uint8"))
    red_mask = cv2.addWeighted(red_mask_low, 1.0, red_mask_high, 1.0, 0.0)
    result_hsv = cv2.bitwise_and(img_in, img_in, mask=red_mask)

    images_path = "output/test/stop/{}".format(img[1][0:17])

    contours = cv2.findContours(red_mask.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)[0]
    #print len(contours)

    roi_list = []
    pos_list = []

    con_id = 0
    if contours is not None:
        for c in contours:
            con_id += 1
            con_area = cv2.contourArea(c)
            if con_area > 40:

                cv2.drawContours(img_draw, [c], -1, (0, 255, 0), 1)
                x, y, w, h = cv2.boundingRect(c)
                cv2.rectangle(img_draw, (x, y), (x + w, y + h), (0, 255, 255),
                              1)

                roi_red = red_mask[y:y + h, x:x + w]
                num_white_pixel = np.sum(roi_red) / 255
                num_total_pixel = w * h
                percentage = float(num_white_pixel) / num_total_pixel
                ratio = float(h) / w
                #print ratio

                #print "contour{}: Area:{},white_pixels{},percentage{}".format(con_id, con_area,num_white_pixel, percentage)

                if percentage > 0.1 and 0.33 < ratio < 3:
                    cv2.rectangle(img_draw, (x, y), (x + w, y + h),
                                  (0, 0, 255), 1)
                    #roi = img_copy[y: y + h, x:x + w]
                    size = max(w, h)
                    roi_red_square = red_mask[y:y + size, x:x + size]
                    roi_list.append(roi_red_square)
                    pos_list.append((x, y, size, size))

                #pos_list.sort(key = lambda x:x[2]*x[3], reverse=True)

    #cv2.imwrite("output/test/stop/{}_red_mask.png".format(img[1][0:18]), red_mask)
    #cv2.imwrite("output/test/stop/{}_hsv.png".format(img[1][0:18]), result_hsv)
    #cv2.imwrite("output/test/stop/{}_box.png".format(img[1][0:18]), img_draw)

    roi_id = 0

    #if not os.path.exists(images_path):
    #print roi_id
    #os.makedirs(images_path)
    #print "There are {} ROI".format(len(roi_list))

    for roi in roi_list:
        roi_id += 1
        #print roi.shape
        #cv2.imwrite(os.path.join(images_path, "red_roi_{}.png".format(roi_id)), roi)

    return (roi_list, pos_list)
def process_image(args):
    
    import os
    from scipy.ndimage.filters import rank_filter
    import numpy as np
    from PIL import Image, ImageEnhance, ImageFilter, ImageDraw
    import matplotlib.pyplot as plt
    import cv2
    
    path = args.input
    out_path = args.output
    
    def deskew(im, save_directory, direct, max_skew=10):
        if direct == "Y":
            height, width = im.shape[:2]
            print(height)
            print(width)

            # Create a grayscale image and denoise it
            if channels != 0:
                im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
                im_gs = cv2.fastNlMeansDenoising(im_gs, h=3)
            else:
                im_gs = cv2.fastNlMeansDenoising(im, h=3)

            # print("De-noise ok.")
            # Create an inverted B&W copy using Otsu (automatic) thresholding
            im_bw = cv2.threshold(im_gs, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            # print("Otsu ok.")

            # Detect lines in this image. Parameters here mostly arrived at by trial and error.
            # If the initial threshold is too high, then settle for a lower threshold value
            try:
                lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 200, minLineLength=width / 12, maxLineGap=width / 150)
                # Collect the angles of these lines (in radians)
                angles = []
                for line in lines:
                    x1, y1, x2, y2 = line[0]
                    geom = np.arctan2(y2 - y1, x2 - x1)
                    print(np.rad2deg(geom))
                    angles.append(geom)
            except:
                lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 150, minLineLength=width / 12, maxLineGap=width / 150)
                # Collect the angles of these lines (in radians)
                angles = []
                for line in lines:
                    x1, y1, x2, y2 = line[0]
                    geom = np.arctan2(y2 - y1, x2 - x1)
                    print(np.rad2deg(geom))
                    angles.append(geom)

            angles = [angle for angle in angles if abs(angle) < np.deg2rad(max_skew)]

            if len(angles) < 5:
                # Insufficient data to deskew
                print("Insufficient data to deskew. Cropped image might already be straight. Cropped image saved.")
                cv2.imwrite(img=im,
                            filename=save_directory + cropped_jpeg_list[pg_count])
                #im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
                #im_pil = Image.fromarray(im)
                #im_pil.save(save_directory + cropped_jpeg_list[pg_count])
                print("Cropped image saved.")
                return im

            else:
                # Average the angles to a degree offset
                angle_deg = np.rad2deg(np.median(angles))

                # Rotate the image by the residual offset
                M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1)
                im = cv2.warpAffine(im, M, (width, height), borderMode=cv2.BORDER_REPLICATE)

                # Plot if a full run
                # Always save deskewed image
                if args.type == "full":
                    plt.subplot(111),plt.imshow(im)
                    plt.title('Deskewed Image'), plt.xticks([]), plt.yticks([])
                    plt.show()
                cropped_jpeg = cropped_jpeg_list[pg_count]
                cv2.imwrite(img = im,
                            filename = save_directory + cropped_jpeg[:-5] + "_rotated.jpeg")
                print("Only de-skewed cropped image saved.")
                return im
        else:
            height, width = im.shape[:2]
            print(height)
            print(width)

            # Create a grayscale image and denoise it
            im_gs = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
            im_gs = cv2.fastNlMeansDenoising(im_gs, h=3)

            # Create an inverted B&W copy using Otsu (automatic) thresholding
            im_bw = cv2.threshold(im_gs, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

            # Detect lines in this image. Parameters here mostly arrived at by trial and error.
            # If the initial threshold is too high, then settle for a lower threshold value
            try:
                lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 200, minLineLength=width / 12, maxLineGap=width / 150)
                # Collect the angles of these lines (in radians)
                angles = []
                for line in lines:
                    x1, y1, x2, y2 = line[0]
                    geom = np.arctan2(y2 - y1, x2 - x1)
                    print(np.rad2deg(geom))
                    angles.append(geom)
            except TypeError:
                lines = cv2.HoughLinesP(im_bw, 1, np.pi / 180, 150, minLineLength=width / 12, maxLineGap=width / 150)
                # Collect the angles of these lines (in radians)
                angles = []
                for line in lines:
                    x1, y1, x2, y2 = line[0]
                    geom = np.arctan2(y2 - y1, x2 - x1)
                    print(np.rad2deg(geom))
                    angles.append(geom)
            except:
                print ("TypeError encountered with HoughLines. Check cropped image output. Only cropped image saved.")
                return

            angles = [angle for angle in angles if abs(angle) < np.deg2rad(max_skew)]

            if len(angles) < 5:
                # Insufficient data to deskew
                print("Insufficient data to deskew. Cropped image might already be straight.")
                return im

            else:

                # Average the angles to a degree offset
                angle_deg = np.rad2deg(np.median(angles))

                # Rotate the image by the residual offset
                M = cv2.getRotationMatrix2D((width / 2, height / 2), angle_deg, 1)
                im = cv2.warpAffine(im, M, (width, height), borderMode=cv2.BORDER_REPLICATE)

                # Plot if a full run
                # Always save deskewed image
                if args.type == "full":
                    plt.subplot(111), plt.imshow(im)
                    plt.title('Deskewed Image'), plt.xticks([]), plt.yticks([])
                    plt.show()
                cropped_jpeg = cropped_jpeg_list[pg_count]
                cv2.imwrite(img=im,
                            filename=save_directory + cropped_jpeg[:-5] + "_rotated.jpeg")
                print("Rotated cropped image saved")
                return im

    def dilate(ary, N, iterations): 
        """Dilate using an NxN '+' sign shape. ary is np.uint8."""
        kernel = np.zeros((N,N), dtype=np.uint8)
        kernel[(N-1)//2,:] = 1
        dilated_image = cv2.dilate(ary / 255, kernel, iterations=iterations)

        kernel = np.zeros((N,N), dtype=np.uint8)
        kernel[:,(N-1)//2] = 1
        dilated_image = cv2.dilate(dilated_image, kernel, iterations=iterations)

        if args.type == "full":
            plt.subplot(111),plt.imshow(dilated_image,cmap = 'gray')
            plt.title('Dilated Image'), plt.xticks([]), plt.yticks([])
            plt.show()

        return dilated_image

    def find_components(edges, max_components=16):
        """Dilate the image until there are just a few connected components.
        Returns contours for these components."""
        # Perform increasingly aggressive dilation until there are just a few
        # connected components.
        count = 410
        dilation = 5
        n = 1
        while count > 400:
            n += 1
            dilated_image = dilate(edges, N=3, iterations=n)
    #         print(dilated_image.dtype)
            dilated_image = cv2.convertScaleAbs(dilated_image)
    #         print(dilated_image.dtype)
            contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            count = len(contours)
            print(count)
        #print dilation
        #Image.fromarray(edges).show()
        #Image.fromarray(255 * dilated_image).show()
        return contours

    def props_for_contours(contours, ary):
        """Calculate bounding box & the number of set pixels for each contour."""
        c_info = []
        for c in contours:
            x,y,w,h = cv2.boundingRect(c)
            c_im = np.zeros(ary.shape)
            cv2.drawContours(c_im, [c], 0, 255, -1)
            c_info.append({
                'x1': x,
                'y1': y,
                'x2': x + w - 1,
                'y2': y + h - 1,
                'sum': np.sum(ary * (c_im > 0))/255
            })
        return c_info

    def union_crops(crop1, crop2):
        """Union two (x1, y1, x2, y2) rects."""
        x11, y11, x21, y21 = crop1
        x12, y12, x22, y22 = crop2
        return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)


    def intersect_crops(crop1, crop2):
        x11, y11, x21, y21 = crop1
        x12, y12, x22, y22 = crop2
        return max(x11, x12), max(y11, y12), min(x21, x22), min(y21, y22)


    def crop_area(crop):
        x1, y1, x2, y2 = crop
        return max(0, x2 - x1) * max(0, y2 - y1)


    def find_border_components(contours, ary):
        borders = []
        area = ary.shape[0] * ary.shape[1]
        for i, c in enumerate(contours):
            x,y,w,h = cv2.boundingRect(c)
            if w * h > 0.5 * area:
                borders.append((i, x, y, x + w - 1, y + h - 1))
        return borders


    def angle_from_right(deg):
        return min(deg % 90, 90 - (deg % 90))


    def remove_border(contour, ary):
        """Remove everything outside a border contour."""
        # Use a rotated rectangle (should be a good approximation of a border).
        # If it's far from a right angle, it's probably two sides of a border and
        # we should use the bounding box instead.
        c_im = np.zeros(ary.shape)
        r = cv2.minAreaRect(contour)
        degs = r[2]
        if angle_from_right(degs) <= 10.0:
            box = cv2.cv.BoxPoints(r)
            box = np.int0(box)
            cv2.drawContours(c_im, [box], 0, 255, -1)
            cv2.drawContours(c_im, [box], 0, 0, 4)
        else:
            x1, y1, x2, y2 = cv2.boundingRect(contour)
            cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
            cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

        return np.minimum(c_im, ary)

    def find_optimal_components_subset(contours, edges):
        """Find a crop which strikes a good balance of coverage/compactness.
        Returns an (x1, y1, x2, y2) tuple.
        """
        c_info = props_for_contours(contours, edges)
        c_info.sort(key=lambda x: -x['sum'])
        total = np.sum(edges) / 255
        area = edges.shape[0] * edges.shape[1]

        c = c_info[0]
        del c_info[0]
        this_crop = c['x1'], c['y1'], c['x2'], c['y2']
        crop = this_crop
        covered_sum = c['sum']

        while covered_sum < total:
            changed = False
            recall = 1.0 * covered_sum / total
            prec = 1 - 1.0 * crop_area(crop) / area
            f1 = 2 * (prec * recall / (prec + recall))
            #print '----'
            for i, c in enumerate(c_info):
                this_crop = c['x1'], c['y1'], c['x2'], c['y2']
                new_crop = union_crops(crop, this_crop)
                new_sum = covered_sum + c['sum']
                new_recall = 1.0 * new_sum / total
                new_prec = 1 - 1.0 * crop_area(new_crop) / area
                new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)

                # Add this crop if it improves f1 score,
                # _or_ it adds 25% of the remaining pixels for <15% crop expansion.
                # ^^^ very ad-hoc! make this smoother
                remaining_frac = c['sum'] / (total - covered_sum)
                new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
                if new_f1 > f1 or (remaining_frac > 0.25 and new_area_frac < 0.15):
                    print ('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
                            i, covered_sum, new_sum, total, remaining_frac,
                            crop_area(crop), crop_area(new_crop), area, new_area_frac,
                            f1, new_f1))
                    crop = new_crop
                    covered_sum = new_sum
                    del c_info[i]
                    changed = True
                    break

            if not changed:
                break

        return crop

    def pad_crop(crop, contours, edges, border_contour, pad_px=15):
        """Slightly expand the crop to get full contours.
        This will expand to include any contours it currently intersects, but will
        not expand past a border.
        """
        bx1, by1, bx2, by2 = 0, 0, edges.shape[0], edges.shape[1]
        if border_contour is not None and len(border_contour) > 0:
            c = props_for_contours([border_contour], edges)[0]
            bx1, by1, bx2, by2 = c['x1'] + 5, c['y1'] + 5, c['x2'] - 5, c['y2'] - 5

        def crop_in_border(crop):
            x1, y1, x2, y2 = crop
            x1 = max(x1 - pad_px, bx1)
            y1 = max(y1 - pad_px, by1)
            x2 = min(x2 + pad_px, bx2)
            y2 = min(y2 + pad_px, by2)
            return crop

        crop = crop_in_border(crop)

        c_info = props_for_contours(contours, edges)
        changed = False
        for c in c_info:
            this_crop = c['x1'], c['y1'], c['x2'], c['y2']
            this_area = crop_area(this_crop)
            int_area = crop_area(intersect_crops(crop, this_crop))
            new_crop = crop_in_border(union_crops(crop, this_crop))
            if 0 < int_area < this_area and crop != new_crop:
                print ('%s -> %s' % (str(crop), str(new_crop)))
                changed = True
                crop = new_crop

        if changed:
            return pad_crop(crop, contours, edges, border_contour, pad_px)
        else:
            return crop

    def downscale_image(im, max_dim=2048):
        """Shrink im until its longest dimension is <= max_dim.
        Returns new_image, scale (where scale <= 1).
        """
        a, b = im.size
        if max(a, b) <= max_dim:
            return 1.0, im

        scale = 1.0 * max_dim / max(a, b)
        new_im = im.resize((int(a * scale), int(b * scale)), Image.ANTIALIAS)
        return scale, new_im

    # Creates an empty list that takes on the filename of each jpeg in the directory
    # Then, it will loop through every single one of them
    uncropped_jpeg_list = []
    cropped_jpeg_list = []
    for file in os.listdir(path):
        uncropped_jpeg_temp = ""
        cropped_jpeg_temp = ""
        if file.endswith('.jpeg'):
            uncropped_jpeg_temp = "/" + file
            # print (uncropped_jpeg)
            cropped_jpeg_temp = uncropped_jpeg_temp[:-5] + "_cropped.jpeg"
            uncropped_jpeg_list.append(uncropped_jpeg_temp)
            cropped_jpeg_list.append(cropped_jpeg_temp)
            # print(cropped_jpeg)

    pg_count = 0
    for uncropped_jpeg in uncropped_jpeg_list:
        orig_im = Image.open(path + uncropped_jpeg)
        scale, im = downscale_image(orig_im)

        # Apply dilation and erosion to remove some noise
        kernel = np.ones((1, 1), np.uint8)
        img = cv2.dilate(np.asarray(im), kernel, iterations=1)
        img = cv2.erode(img, kernel, iterations=1)

        # Detect edge and plot
        edges = cv2.Canny(img, 100, 400)

        if args.type == "full":
            plt.subplot(111),plt.imshow(edges,cmap = 'gray')
            plt.title('Edge Image'), plt.xticks([]), plt.yticks([])

            plt.show()

        # TODO: dilate image _before_ finding a border. This is crazy sensitive!
        contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        borders = find_border_components(contours, edges)
        borders.sort(key=lambda i, x1, y1, x2, y2: (x2 - x1) * (y2 - y1))

        border_contour = None
        if len(borders):
            border_contour = contours[borders[0][0]]
            edges = remove_border(border_contour, edges)

        edges = 255 * (edges > 0).astype(np.uint8)

        # Remove ~1px borders using a rank filter.
        maxed_rows = rank_filter(edges, -4, size=(1, 20))
        maxed_cols = rank_filter(edges, -4, size=(20, 1))
        debordered = np.minimum(np.minimum(edges, maxed_rows), maxed_cols)
        edges = debordered

        contours = find_components(edges)
        if len(contours) == 0:
    #        print '%s -> (no text!)' % path
            return

        crop = find_optimal_components_subset(contours, edges)
        crop = pad_crop(crop, contours, edges, border_contour)

        crop = [int(x / scale) for x in crop]  # upscale to the original image size.
        draw = ImageDraw.Draw(im)
        c_info = props_for_contours(contours, edges)
        for c in c_info:
            this_crop = c['x1'], c['y1'], c['x2'], c['y2']
            draw.rectangle(this_crop, outline='blue')
        draw.rectangle(crop, outline='red')
    #   im.save(out_path + cropped_jpeg_list[pg_count])
        draw.text((50, 50), path, fill='red')
    #   orig_im.save(out_path + cropped_jpeg_list[pg_count])
        if args.type == "full":
            im.show()
        text_im = orig_im.crop(crop)
        w_original, h_original = orig_im.size
        w_original_half = w_original/2
        w_cropped, h_cropped = text_im.size
        if w_cropped < w_original_half:
            text_im = orig_im
            print ("More than half the page was cropped width-wise. Defaulting to original uncropped image.")
        # Converting to np array to calculate number of channels in jpg. Some directories are single channel jpgs
        open_cv_image = np.array(text_im)
        if open_cv_image.ndim == 2:
            channels = 0
        else:
            channels = open_cv_image.shape[2]
        print(channels)

    #    try:
            # print(type(text_im))
    #    except:
            # print("")
    #    text_im.save(out_path + cropped_jpeg_list[pg_count])
    #    print '%s -> %s' % (path, out_path)

        # Deskew image
        direct_wo_saving = ""
        try:
            direct_wo_saving = "Y"
            # Convert RGB to BGR
            if channels != 0:
                open_cv_image = open_cv_image[:, :, ::-1].copy()
            deskewed_image = deskew(im=open_cv_image,
                                    save_directory=out_path,
                                    direct=direct_wo_saving)
            pg_count += 1
            print("Pg " + str(pg_count) + " de-skew complete")
        except:
            direct_wo_saving = "N"
            text_im.save(out_path + cropped_jpeg_list[pg_count])
            cropped_image = cv2.imread(out_path + cropped_jpeg_list[pg_count])
            print("Cropped image saved to, and read from file")
            deskewed_image = deskew(im=cropped_image,
                                    save_directory=out_path,
                                    direct=direct_wo_saving)
            pg_count += 1
    def process_rgb_desire_image(self,rgb_image):
        color1 = [0, 0, 255]
        color2 = [255, 0, 0]
        color3 = [0, 255, 0]
        thickness = 5

        """
        region selecting
        row 行
        """
        bottom_left_cols1 = 0.33
        bottom_left_rows1 = 0.15
        top_left_cols1 = 0.33
        top_left_rows1 = 0.08
        bottom_right_cols1 = 0.75
        bottom_right_rows1 = 0.15
        top_right_cols1 = 0.75
        top_right_rows1 = 0.08
        sucker_line_uv = sucker_tile_line()
        uvuv = uv()
        MORPH = 7
        CANNY = 250
        ##################
        rgb=rgb_image

        if rgb_image is not None:


            """'
            Select Blue Desire position
            tile_id=1,fixed point id
            obj_desire="o" object
            """
            YHLS=self.select_yellow(rgb)
            # print "YHLS",YHLS
            Y_gray = self.convert_gray_scale(YHLS)
            Y_smooth = self.apply_smoothing(Y_gray,1)
            Y_edges = self.detect_edges(Y_smooth)
            New_edges = Y_edges.copy()


            Y_kernel = cv2.getStructuringElement( cv2.MORPH_RECT, ( MORPH, MORPH ) )
            Y_closed = cv2.morphologyEx( Y_edges.copy(), cv2.MORPH_CLOSE, Y_kernel )
            _,Y_contours, Y_h = cv2.findContours( Y_closed.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
            # print "Y_h",Y_h,Y_contours
            if len(Y_contours)!=0:
                rgb = self.Draw_triangle(Y_contours, rgb,'o')
            else:
                print "There is no tile0,you need put one blue tile"
                self.pub_empty_uv_info(0, 'o')
            cv2.circle(rgb, (316,251), 10, (20, 100, 220), -2)
            cv2.putText(rgb, 'center', (316,251),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            if len(self.obj_buf)!=0:
                cv2.circle(rgb, (int(self.obj_buf[-1][0]), int(self.obj_buf[-1][1])), 10, (100, 100, 220), -2)
                cv2.putText(rgb, 'oc', (int(self.obj_buf[-1][0]), int(self.obj_buf[-1][1])),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            if len(self.desire_buf)!=0:
                cv2.circle(rgb, (int(self.desire_buf[-1][0]), int(self.desire_buf[-1][1])), 10, (200, 200, 220), -2)
                cv2.putText(rgb, 'dc', (int(self.desire_buf[-1][0]), int(self.desire_buf[-1][1])),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            """
            bottom_left_cols1=0.53
            bottom_left_rows1=0.70
            top_left_cols1=0.53
            top_left_rows1=0.28
            bottom_right_cols1=0.45
            bottom_right_rows1=0.70
            top_right_cols1=0.45
            top_right_rows1=0.28
            """
            # region_up = self.select_region(Y_edges, 0.44, 0.80, 0.44, 0.15, 0.70, 0.80, 0.70, 0.15)
            region_up = self.select_region(Y_edges, bottom_left_cols1, bottom_left_rows1, top_left_cols1, top_left_rows1,bottom_right_cols1, bottom_right_rows1, top_right_cols1, top_right_rows1)
            region_up_line = self.hough_lines(region_up)
            left_line_info,slop_intercept = self.tile_sucker_lines(rgb, region_up_line)

            if left_line_info!=None:
                cv2.line(rgb, left_line_info[0], left_line_info[1], color1, thickness)

                sucker_line_uv.sucker_tile_uv0.uvinfo = [left_line_info[0][0], left_line_info[0][1]]
                sucker_line_uv.sucker_tile_uv1.uvinfo= [left_line_info[1][0], left_line_info[1][1]]

                sucker_line_uv.sucker_tile_slope = slop_intercept[0]
                sucker_line_uv.sucker_tile_intercept = slop_intercept[1]
                self.sucker_line_pub.publish(sucker_line_uv)
                print "slop_intercept--------------", slop_intercept
            else:
                pass
            """
            line detect
            """
            cv2.namedWindow( 'region_up', cv2.WINDOW_NORMAL )
            cv2.imshow( 'region_up', region_up )

            # cv2.namedWindow( 'sucker_line', cv2.WINDOW_NORMAL )
            # cv2.imshow( 'sucker_line', region_up_line )
            """
            HLS SPACE
            """
            HLSDOUBLE=self.convert_hls(rgb)
            cv2.namedWindow( 'HLSDOUBLE_Space', cv2.WINDOW_NORMAL )
            cv2.imshow( 'HLSDOUBLE_Space', HLSDOUBLE )

            cv2.namedWindow( 'Yellow_HLS_Space', cv2.WINDOW_NORMAL )
            cv2.imshow( 'Yellow_HLS_Space', YHLS )

            cv2.namedWindow( 'Yellow_tile_edges', cv2.WINDOW_NORMAL )
            cv2.imshow( 'Yellow_tile_edges', Y_edges )

            cv2.namedWindow( 'tile_pixel_frame', cv2.WINDOW_NORMAL )
            cv2.imshow( 'tile_pixel_frame', rgb )

            cv2.waitKey(8)

            # # 再将opencv格式额数据转换成ros image格式的数据发布
            try:
                self.image_pub.publish(self.bridge.cv2_to_imgmsg(rgb_image, "bgr8"))
            except CvBridgeError as e:
                print e
Ejemplo n.º 56
0
def green_light_detection(img_in, img, radii_range):

    green_low = (80, 100, 100)
    green_high = (90, 255, 255)

    img_draw = np.copy(img_in)
    img_copy = np.copy(img_in)
    img_hsv = cv2.cvtColor(img_in, cv2.COLOR_BGR2HSV)
    green_mask = cv2.inRange(img_hsv, np.array(green_low, dtype="uint8"),
                             np.array(green_high, dtype="uint8"))

    images_path = "output/test/lights_green/{}".format(img[1][0:])

    contours = cv2.findContours(green_mask.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)[0]
    # print len(contours)

    roi_list = []
    pos_list = []

    con_id = 0
    if contours is not None:
        for c in contours:
            con_id += 1
            con_area = cv2.contourArea(c)
            if 400 > con_area > 20:

                cv2.drawContours(img_draw, [c], -1, (0, 255, 0), 1)
                x, y, w, h = cv2.boundingRect(c)
                cv2.rectangle(img_draw, (x, y), (x + w, y + h), (0, 255, 255),
                              1)
                #print w*h

                roi_red = green_mask[y:y + h, x:x + w]
                num_white_pixel = np.sum(roi_red) / 255
                num_total_pixel = w * h
                percentage = float(num_white_pixel) / num_total_pixel
                ratio = float(h) / w
                # print ratio

                # print "contour{}: Area:{},white_pixels{},percentage{}".format(con_id, con_area,num_white_pixel, percentage)

                if percentage > 0.1 and 0.5 < ratio < 2:
                    cv2.rectangle(img_draw, (x, y), (x + w, y + h),
                                  (0, 0, 255), 1)
                    # roi = img_copy[y: y + h, x:x + w]
                    # size = max(w, h)
                    y1 = max(y - 2 * h, 1)
                    roi_green_light = img_copy[y1:y + h, x:x + w]
                    roi_list.append(roi_green_light)
                    pos_list.append((x, y1, w, 3 * h))

                # pos_list.sort(key = lambda x:x[2]*x[3], reverse=True)

    #cv2.imwrite("output/test/lights_green/{}_green_mask.png".format(img[1][0:]), green_mask)
    #cv2.imwrite("output/test/lights_green/{}_box.png".format(img[1][0:]), img_draw)

    roi_id = 0

    #if not os.path.exists(images_path):
    # print roi_id
    #os.makedirs(images_path)
    # print "There are {} ROI".format(len(roi_list))

    for roi in roi_list:
        roi_id += 1
        #print roi.shape
        #cv2.imwrite(os.path.join(images_path, "green_roi_{}.png".format(roi_id)), roi)

    return roi_list, pos_list
Ejemplo n.º 57
0
    imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    #convert BGR to HSV
    # HSV코드로 변경
    # HSV는 색상 채도 명도로 색상을 표현함

    mask = cv2.inRange(imgHSV, lowerBound, upperBound)
    # lowerBound와 upperBound 사이에 있는 HSV값은 0으로 그렇지 않으면 1로 만든다.

    maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
    maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
    #morphology
    # 모폴로지 필터링
    # 미리 특정한 형태를 띠는 필터를 만들고 이 필터를 영상에 씌워 새로운 영상을 얻어내는 것

    maskFinal = maskClose
    conts, h = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_NONE)
    # cv2.findContours(image, mode, method)

    # mode – contours를 찾는 방법
    ## cv2.RETR_EXTERNAL : contours line중 가장 바같쪽 Line만 찾음.
    ## cv2.RETR_LIST : 모든 contours line을 찾지만, hierachy 관계를 구성하지 않음.
    ## cv2.RETR_CCOMP : 모든 contours line을 찾으며, hieracy관계는 2-level로 구성함.
    ## cv2.RETR_TREE : 모든 contours line을 찾으며, 모든 hieracy관계를 구성함.

    #method – contours를 찾을 때 사용하는 근사치 방법
    ## cv2.CHAIN_APPROX_NONE : 모든 contours point를 저장.
    ## cv2.CHAIN_APPROX_SIMPLE : contours line을 그릴 수 있는 point 만 저장. (ex; 사각형이면 4개 point)
    ## cv2.CHAIN_APPROX_TC89_L1 : contours point를 찾는 algorithm
    ## cv2.CHAIN_APPROX_TC89_KCOS : contours point를 찾는 algorithm

    if (len(conts) == 2):
Ejemplo n.º 58
0
    up_h = cv2.getTrackbarPos('up_h', 'Trackbar')
    up_s = cv2.getTrackbarPos('up_s', 'Trackbar')
    up_v = cv2.getTrackbarPos('up_v', 'Trackbar')
    # lower_hsvRange = np.array([low_h,low_s,low_v])
    # upper_hsvRange = np.array([up_h,up_s,up_v])

    lower_hsvRange = np.array([40, 58, 129])
    upper_hsvRange = np.array([77, 255, 255])
    hsv = cv2.inRange(hsv, lower_hsvRange, upper_hsvRange)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

    hsv = cv2.morphologyEx(hsv, cv2.MORPH_CLOSE, kernel)
    hsv = cv2.medianBlur(hsv, 5)
    blurred = cv2.GaussianBlur(hsv, (5, 5), 0)
    contours, hierarchy = cv2.findContours(blurred, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_NONE)
    try:
        c = max(contours, key=cv2.contourArea)
        cv2.drawContours(picture2, c, -1, (0, 255, 0), 3)  # draw all contours
        bitwise_and = cv2.bitwise_and(picture2, picture2, mask=hsv)
    except:
        pass
    else:
        rect = cv2.minAreaRect(c)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        picture = cv2.drawContours(picture, [box], 0, (0, 0, 255), 2)

        KNOWN_DISTANCE = 450
        KNOWN_WIDTH = 100
        flat_rect = rect[1]
Ejemplo n.º 59
0
debug = []

image_path = args.image
image = cv2.imread(image_path)
debug.append(("input", image))

gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
debug.append(("gray", gray))
ret, thresh = cv2.threshold(gray, 79, 255, cv2.THRESH_BINARY_INV)
# debug.append(("thresh", thresh))

# thresh = thresh < 50
# crop = thresh[np.ix_(thresh.any(1),thresh.any(0))]

_, crop_contour, _ = cv2.findContours(thresh, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)

# img_contour = image.copy()
# cv2.drawContours(img_contour, crop_contour, 0 , (0,255,0), 2)
debug.append(("thresh", thresh))

cnt = crop_contour[0]
x, y, w, h = cv2.boundingRect(cnt)
crop = thresh[y:y + h, x:x + w]
debug.append(("crop", crop))

kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(crop, kernel, iterations=1)
debug.append(("erosion", erosion))

_, circles, _ = cv2.findContours(erosion, cv2.RETR_EXTERNAL,
    def process_rgb_object_image(self,rgb_image):

        MORPH = 7
        CANNY = 250
        ##################
        rgb=rgb_image

        if rgb_image is not None:


            """'
            Select Blue Desire position
            tile_id=1,fixed point id
            obj_desire="o" object
            """
            YHLS=self.select_yellow(rgb)
            # print "YHLS",YHLS
            Y_gray = self.convert_gray_scale(YHLS)
            Y_smooth = self.apply_smoothing(Y_gray,1)
            Y_edges = self.detect_edges(Y_smooth)
            Y_kernel = cv2.getStructuringElement( cv2.MORPH_RECT, ( MORPH, MORPH ) )
            Y_closed = cv2.morphologyEx( Y_edges.copy(), cv2.MORPH_CLOSE, Y_kernel )
            _,Y_contours, Y_h = cv2.findContours( Y_closed.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
            # print "Y_h",Y_h,Y_contours
            if len(Y_contours)!=0:
                rgb = self.Draw_triangle(Y_contours, rgb,'o')
            else:
                print "There is no tile0,you need put one blue tile"
                self.pub_empty_uv_info(0, 'o')
            cv2.circle(rgb, (316,251), 10, (20, 100, 220), -2)
            cv2.putText(rgb, 'center', (316,251),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            if len(self.obj_buf)!=0:
                cv2.circle(rgb, (int(self.obj_buf[-1][0]), int(self.obj_buf[-1][1])), 10, (100, 100, 220), -2)
                cv2.putText(rgb, 'oc', (int(self.obj_buf[-1][0]), int(self.obj_buf[-1][1])),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            if len(self.desire_buf)!=0:
                cv2.circle(rgb, (int(self.desire_buf[-1][0]), int(self.desire_buf[-1][1])), 10, (200, 200, 220), -2)
                cv2.putText(rgb, 'dc', (int(self.desire_buf[-1][0]), int(self.desire_buf[-1][1])),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), 1)
            """
            HLS SPACE
            """
            HLSDOUBLE=self.convert_hls(rgb)
            cv2.namedWindow( 'HLSDOUBLE_Space', cv2.WINDOW_NORMAL )
            cv2.imshow( 'HLSDOUBLE_Space', HLSDOUBLE )

            cv2.namedWindow( 'Yellow_HLS_Space', cv2.WINDOW_NORMAL )
            cv2.imshow( 'Yellow_HLS_Space', YHLS )

            cv2.namedWindow( 'Yellow_tile_edges', cv2.WINDOW_NORMAL )
            cv2.imshow( 'Yellow_tile_edges', Y_edges )

            cv2.namedWindow( 'tile_pixel_frame', cv2.WINDOW_NORMAL )
            cv2.imshow( 'tile_pixel_frame', rgb )

            cv2.waitKey(8)

            # # 再将opencv格式额数据转换成ros image格式的数据发布
            try:
                self.image_pub.publish(self.bridge.cv2_to_imgmsg(rgb_image, "bgr8"))
            except CvBridgeError as e:
                print e
        # return central_list
        """