Exemplo n.º 1
0
def test_imageSWT():
  filename = 'test/rab_butler.jpg'
  img = cv2.imread(filename,0)
  B,G,R = cv2.split(cv2.imread(filename,1))
  img_color = cv2.merge((R,G,B))
  swt_pos = swt.strokeWidthTransform(img, 1)
  swt_pos_dilated = 255 - cv2.dilate(255 - swt_pos, kernel = np.ones((2,2),np.uint8), iterations = 2)
  swt_neg = swt.strokeWidthTransform(img, -1)
  swt_neg_dilated = 255 - cv2.dilate(255 - swt_neg, kernel = np.ones((2,2),np.uint8), iterations = 2)

  plt.subplot(3,2,1)
  plt.imshow(img_color, interpolation="none")
  plt.title('original image')

  plt.subplot(3,2,3)
  plt.imshow(swt_pos, cmap="gray", interpolation="none")
  plt.title('positive swt of image')
  plt.subplot(3,2,4)
  plt.imshow(swt_pos_dilated, cmap="gray", interpolation="none")
  plt.title('dilated positive swt of image')

  plt.subplot(3,2,5)
  plt.title('negative swt of image')
  plt.imshow(swt_neg, cmap="gray", interpolation="none")
  plt.subplot(3,2,6)
  plt.title('dilated negative swt of image')
  plt.imshow(swt_neg_dilated, cmap="gray", interpolation="none")

  plt.show()
Exemplo n.º 2
0
def drawCorners(img):
    min_dilations = 0
    max_dilations = 7
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    for k in range(0, 6):
        for dilations in range(min_dilations, max_dilations):
    
            #cv2.adaptiveThreshold(img, thresh_img, 255,
            #        cv2.CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, block_size, (k/2)*5)
    
            #if dilations > 0:
            #    thresh_img = cv2.dilate(thresh_img, 0, dilations - 1)

            mean = cv2.mean(gray)[0]
            thresh_level = int(mean - 10)
            thresh_level = max(thresh_level, 10)

            retval, thresh_img = cv2.threshold(gray, thresh_level, 255, cv2.THRESH_BINARY)
            cv2.dilate(thresh_img, None, thresh_img, (-1,-1), dilations)

            rows = len(thresh_img)
            cols = len(thresh_img[0])
                    
            cv2.rectangle(thresh_img, (0, 0), (cols-1, rows-1), (255, 255, 255), 3, 8)
            
            cv2.imshow("drawCorners: thresh_img " + str(k*0) + str(dilations), thresh_img)
            cv2.waitKey(1)
Exemplo n.º 3
0
    def dilate(self, size=(5, 5), iterations=1, binary_in=False):
        """
        morphological dilate
        Parameters
        ----------
        size : 'tuple'
            kernel size
        iterations : 'int'
            number of times to run kernel
        binary_in : 'boole'
            run on binary

        Returns
        -------
        self.img_b : 'array'
            new binary array
        self.img : 'array'
            new img array

        """
        kernel = np.ones(size, np.uint8)

        if binary_in:
            dilation = cv2.dilate(self.img_b, kernel, iterations=iterations)
            self.img_b = dilation
        else:
            dilation = cv2.dilate(self.img, kernel, iterations=iterations)
            self.img = dilation
Exemplo n.º 4
0
    def get_contours(self, frame, crop, adjustments, o_type=None):
        """
        Adjust the given frame based on 'min', 'max', 'contrast' and 'blur'
        keys in adjustments dictionary.
        """
        try:
            if o_type == 'BALL':
                frame = frame[crop[2]:crop[3], crop[0]:crop[1]]
            if frame is None:
                return None
            if adjustments['blur'] >= 1:
                blur = self.oddify(adjustments['blur'])
                # print adjustments['blur']

                frame =  cv2.GaussianBlur(frame, (blur, blur), 0)
                # plt.imshow(frame)
                # plt.show()

            if adjustments['contrast'] >= 1.0:
                frame = cv2.add(frame,
                                np.array([float(adjustments['contrast'])]))

            # Convert frame to HSV
            frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            # Create a mask
            frame_mask = cv2.inRange(frame_hsv,
                                     adjustments['min'],
                                     adjustments['max'])
            
            # Find contours
            if adjustments['open'] >= 1:
                kernel = np.ones((2,2),np.uint8)
                frame_mask = cv2.morphologyEx(frame_mask,
                                              cv2.MORPH_OPEN,
                                              kernel,
                                              iterations=adjustments['open'])

            if adjustments['close'] >= 1:
                kernel = np.ones((2,2),np.uint8)
                frame_mask = cv2.dilate(frame_mask,
                                        kernel,
                                        iterations=adjustments['close'])

            if adjustments['erode'] >= 1:
                kernel = np.ones((2,2),np.uint8)
                frame_mask = cv2.dilate(frame_mask,
                                        kernel,
                                        iterations=adjustments['erode'])
            
            contours, hierarchy = cv2.findContours(
                frame_mask,
                cv2.RETR_TREE,
                cv2.CHAIN_APPROX_SIMPLE
            )

            return (contours, hierarchy, frame_mask)
        except:
            # bbbbb
            raise 
  def classifyFrame(self):
    if not self.depthRosImg or not self.rgbRosImg:
      return None

    origImg = or_util.toCvImg(self.rgbRosImg, 'bgr8')
    depthImg = or_util.toCvImg(self.depthRosImg, 'passthrough')

    denoised = cv2.GaussianBlur(origImg, (2*self.blurSize+1, 2*self.blurSize+1), self.sigmaX / 10.)

    structuringElement = img_manip.getStructuringElement(cv2.MORPH_ELLIPSE, self.morphSize)
    morphed = denoised
    morphed = cv2.dilate(morphed, structuringElement)
    morphed = cv2.erode(morphed, structuringElement)
    morphed = cv2.erode(morphed, structuringElement)
    morphed = cv2.dilate(morphed, structuringElement)

    hsvImg = cv2.cvtColor(morphed, cv2.COLOR_BGR2HSV)
    detector = cv2.SimpleBlobDetector(self.blobParams)
    keypoints = detector.detect(hsvImg)

    objects = []
    for kp in keypoints:
      classes = self.classifyOne(depthImg, origImg, hsvImg, kp)
      objects.append(classes)

    self.tracker.feed(keypoints, map(lambda obj: OBJECTS_DICT[obj], objects))
    trackerKeypoints, trackerVotes = self.tracker.voteAll()
    trackerObjects = map(lambda vote: OBJECTS_LIST[vote], trackerVotes)

    out = drawStuff(morphed, trackerKeypoints, trackerObjects)
    if self.show:
      cv2.imshow(WINDOW_NAME, out)
      cv2.waitKey(1)

    return trackerObjects, out
Exemplo n.º 6
0
    def select_lines(self):
        image_gray = img_fun.image_gray(self.image)
        image_bin = cv2.adaptiveThreshold(image_gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 33, 5)

        horizontalsize = 50
        horizontalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontalsize, 1))
        image_bin = cv2.erode(image_bin, horizontalStructure, iterations=1)
        image_bin = cv2.dilate(image_bin,horizontalStructure, iterations=1)

        # image_bin = cv2.dilate(image_bin,np.ones((2,500)), iterations=1)
        image_bin = cv2.dilate(image_bin,np.ones((2,200)), iterations=1)


        img,reg,pos = self.select_horizontal_lines(self.image.copy(),image_bin)

        lines,groups = npt.add_additional_lines(pos)

        self.lines = lines
        self.groups = groups

        # cv2.imshow('lines',cv2.resize(image_bin, (1000, 750), interpolation=cv2.INTER_NEAREST))
        # cv2.waitKey(0)
        # cv2.imshow('aaaaaaaaaa',img)

        img_fun.show_image('bin',image_bin)
        img_fun.show_image('img' ,img)
Exemplo n.º 7
0
def colorImage(originalImage, imageOne, imageTwo):
    
    #Dilate the first image
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
    imageOneDilated= cv2.dilate(imageOne, kernel)
    imageOne = imageOneDilated - imageOne

    #Dilate the second Image
    imageTwoDilated = cv2.dilate(imageTwo, kernel)
    imageTwo = imageTwoDilated - imageTwo

    #Get all the borders in both images by getting the white points
    imageOneBorders = np.where(imageOne != 0)
    imageOneBorders = zip(imageOneBorders[0], imageOneBorders[1])
    
    imageTwoBorders = np.where(imageTwo != 0)
    imageTwoBorders = zip(imageTwoBorders[0], imageTwoBorders[1])

    originalImage = cv2.cvtColor(originalImage, cv2.COLOR_GRAY2RGB)
    
    #Color the borders of small coins red
    for i in imageOneBorders:
        originalImage.itemset((i[0], i[1], 0), 0)
        originalImage.itemset((i[0], i[1], 1), 0)
        originalImage.itemset((i[0], i[1], 2), 255)

    #Color the borders of the large coins blue
    for i in imageTwoBorders:
        originalImage.itemset((i[0], i[1], 0), 255)
        originalImage.itemset((i[0], i[1], 1), 0)
        originalImage.itemset((i[0], i[1], 2), 0)
        
    return originalImage
    def mouseDoubleClickEvent(self, event):
        frame = self.bridge.imgmsg_to_cv(self.image, 'bgr8')
        cv_image = np.array(frame, dtype=np.uint8)        
        unclosed_gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        kernel = np.array([[1, 1, 1],[1, 1, 1],[1, 1, 1]], 'uint8')
        kernel7 = np.ones((7,7),'uint8')
        gray = cv2.dilate(cv2.erode(unclosed_gray, kernel7), kernel7)
        edges = cv2.Canny(gray,50,150,apertureSize = 3)

        # kernel = np.ones((3,3),'uint8')
        dilated_edges = cv2.dilate(edges, kernel)

        minLineLength = 140
        maxLineGap = 10
        lines = cv2.HoughLinesP(dilated_edges,1,np.pi/180,300,minLineLength,maxLineGap)
        if lines is not None:
            for x1,y1,x2,y2 in lines[0]:
                cv2.line(cv_image,(x1,y1),(x2,y2),(0,255,0),2)
        frame = cv.fromarray(cv_image)
        edges = cv.fromarray(edges)
        dilated_edges = cv.fromarray(dilated_edges)
        cv.ShowImage("im window", frame)
        cv.ShowImage("im window2", edges)
        cv.ShowImage("im window3", dilated_edges)
        cv.MoveWindow("im window2", 820, 60)
        cv.MoveWindow("im window3", 820, 660)
        print "Yoda"
Exemplo n.º 9
0
    def find_position ( self, frame ):
        pos_x = self.last_x
        pos_y = self.last_y

        hsv_img = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
        mask1 = cv2.inRange( hsv_img, self.lower_red_l, self.lower_red_h )
        mask2 = cv2.inRange( hsv_img, self.upper_red_l, self.upper_red_h )
       
        mask = mask1 + mask2       

        mask = cv2.erode( mask, self.kernel )
        mask = cv2.dilate( mask, self.kernel )

        mask = cv2.dilate( mask, self.kernel ) 
        mask = cv2.erode( mask, self.kernel )

        o_moments = cv2.moments( mask )
        d_m01 = o_moments['m01']
        d_m10 = o_moments['m10']
        d_area = o_moments['m00']
        print "MOMENTS " + str(d_m01) + " " + str(d_m10) + " " + str(d_area)

        if d_area > 10000:
            pos_x = int(d_m10 / d_area)
            pos_y = int(d_m01 / d_area)
            print "[" + str(pos_x) + " , " + str(pos_y) + "]"
        return pos_x, pos_y 
Exemplo n.º 10
0
    def process(self, image):
        foreground = getfg(image)
        element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        cv2.erode(foreground, element, foreground, iterations=1)
        cv2.dilate(foreground, element, foreground, iterations=1)

        return foreground
Exemplo n.º 11
0
def find_boxes(img):
    """
    Detects box(square) shapes in the input image.
    :param img: input image.
    :return: image with outlines of boxes from the original image.
    """
    kernel_length = np.array(img).shape[1] // 75
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # Detect vertical and horizontal lines in the image
    img_temp1 = cv2.erode(img, verticle_kernel, iterations=2)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=2)
    img_temp2 = cv2.erode(img, hori_kernel, iterations=2)
    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=2)

    # Weighting parameters, this will decide the quantity of an image to be added to make a new image.
    alpha = 0.5
    beta = 1.0 - alpha
    # Add the vertical and horizontal lines images to get a third image as summation.
    img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
    img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
    (_, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    return img_final_bin
Exemplo n.º 12
0
    def get_corners(self, img):
        """ img: np.array, -> np.array

        This function will find corners. It does:
            dilate with cross
            erode with diamond
            dilate with X
            erode with square
            Corners are obtained by differentiating the two closed images
        """
        img_1 = cv2.dilate(img, self._kernel_cross)
        print('cross dilate')
        # ava.cv.utl.show_image_wait_2(img_1) # ------------------

        img_1 = cv2.erode(img_1, self._kernel_diamond)
        print('erode diamond')
        # ava.cv.utl.show_image_wait_2(img_1) # ------------------

        img_2 = cv2.dilate(img, self._kernel_x)
        print('x dilate')
        # ava.cv.utl.show_image_wait_2(img_2) # ------------------

        img_2 = cv2.erode(img_2, self._kernel_5x5)
        print('erode square')
        # ava.cv.utl.show_image_wait_2(img_2) # ------------------

        img_1 = cv2.absdiff(img_2,img_1)
        #threshold
        img_1 = self.apply_threshold(img_1)

        return img_1
Exemplo n.º 13
0
    def calibrate_hsv(self, img):        
        element = self.element
        result = np.zeros((img.shape[0], img.shape[1]), np.uint8)
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        h,s,v = cv2.split(img_hsv)
        d = cv2.inRange(h, np.array([self.current_conf[0]],np.uint8), 
                           np.array([self.current_conf[1]],np.uint8))
        d2 = cv2.inRange(h, np.array([self.current_conf[2]],np.uint8), 
                            np.array([self.current_conf[3]],np.uint8))
        d = cv2.bitwise_or(d, d2)
        d = cv2.erode(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        d = cv2.dilate(d, element)
        result = d

        res = result.copy()
        contours, hier = cv2.findContours(res, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        self.current_rect_count = len(contours)
        self.rects = self.choose_contour(contours)
        if len(self.rects) > 0:
            self.current_biggest_rect_area = area(self.rects[0])
        else:
            self.current_biggest_rect_area = 0
        middle_roi = get_roi(result, self.middle_rect)
        self.current_middle_non_zero = cv2.countNonZero(middle_roi)

        self.feedback()
Exemplo n.º 14
0
    def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if self.prevPrevFrame is None:
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        cv.dilate(th1, None, iterations=15)
        cv.erode(th1, None, iterations=1)

        delta_count = cv.countNonZero(th1)

        cv.imshow("frame_th1", th1)

        self.prevPrevFrame = self.prevFrame
        self.prevFrame = gray

        ret = delta_count > self.threshold

        if ret:
            self.updateMotionDetectionDts()

        return ret
Exemplo n.º 15
0
def hsv_to_im_mask(im_hsv, hsv_lows, hsv_highs, is_bucket=False, is_arm=False):
    if is_bucket:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 7)
        # erode
        im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    elif is_arm:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 9)
        # erode
        # im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    else:
        # mask by threshold
        im_mask = cv2.inRange(im_hsv, hsv_lows, hsv_highs)
        im_mask = cv2.medianBlur(im_mask, 5)
        # erode
        # im_mask = cv2.erode(im_mask, None, iterations=2)
        # dilate
        im_mask = cv2.dilate(im_mask, None, iterations=3)
    return im_mask
Exemplo n.º 16
0
    def skin_blobs(self, img, det_face_hsv, face_rect, masked_img):
        """
        Do blob morphology stuff on faces. Perform a mask,
        Then dilate and erode to make them into more coherent blobs.

        :param img: BGR image from webcam
        :param det_face_hsv: hsv image of the face from the previous detection
        :param face_rect: non-normalized dimensions of face rectangle (left, top, cols, rows)
        :return: 2D array, black and white image of skin blobs
        """

        #open and close
        # kernel size and shape are more art than science
        # using a small kernel to erode noise and a large on to
        # to dilate since I have more false negatives with skin
        # detection than I do false positives.
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
        kernel_small = kernel & np.transpose(kernel) #symmetry
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))
        kernel_big = kernel & np.transpose(kernel) #symmetry
        blob_img = cv2.erode(masked_img, kernel_small)
        blob_img = cv2.dilate(blob_img, kernel_big)
        blob_img = cv2.erode(blob_img, kernel_small)
        blob_img = cv2.dilate(blob_img, kernel_big)
        return blob_img
Exemplo n.º 17
0
    def getPoly(self):
        
        self.image = self.image[0:][300:]
        imgray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        ret,thresh = cv2.threshold(imgray,0,255,0)
        
        #cv2.bitwise_not(thresh)
        height = thresh.shape[0]
        width = thresh.shape[1]
        tempImage = np.copy(thresh)
        fillPoint = None
        for x in range(height - 1, 0, -1):
            currPixel = thresh[x][width / 2]
            if currPixel.all()  != 0:
                 fillPoint = ((width/ 2), x)
                 print fillPoint
                 break
        dim = (height + 2, width + 2)
        mask = np.zeros(dim, dtype=np.uint8)
        
        #Produces nothing if the fill point is used
        #If (0, 0) is used it fills in noise
        cv2.floodFill(thresh, mask, (0, 0), 255)
        cv2.imshow("filledImage", thresh)

        #removes most noise from the thresholded image
        noiseRemoved = cv2.bitwise_xor(thresh, tempImage)
        
        #Dilates in order to remove more noise
        cv2.dilate(noiseRemoved, np.ones((4,4), dtype=np.uint8), noiseRemoved, (-1, -1), 1)
        
        cv2.imshow("f", noiseRemoved)
Exemplo n.º 18
0
def get_mask_YCbCr(im, mask_YCbCr):
    minH = 80;    maxH = 220 
    minS = 65;    maxS = 220 
    minV = 65;    maxV = 220 

    tmp = cv2.cvtColor(im, cv.CV_BGR2YCrCb) # RGB → YCrCb
    
    p_src = cv2.split(tmp) # 元画像RGBのimを。HVSの場合はtmpとする。修正HSVとする場合はimでよい。
    p_dst = cv2.split(tmp) # マスク用に変更する画像

    # 修正HSVではなく普通のHSVで使うやつ
    H = p_src[0]    # 0から180 
    S = p_src[1]
    V = p_src[2] 

    p_dst[0] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[1] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[2] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    
    mask_YCbCr[:,:,0] = p_dst[0]; mask_YCbCr[:,:,1] = p_dst[1]; mask_YCbCr[:,:,2] = p_dst[2];

    # 細かいノイズを取り除く
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    cv2.dilate( np.uint8(mask_YCbCr), element )
    cv2.erode(np.uint8(mask_YCbCr), element)
Exemplo n.º 19
0
 def detectRover(self, argFrame):
     frame    = self.frame
     hsvFrame = self.frame
     thresh   = self.frame[:,:,0]
     rGreen = (38,67,155,198,0,255)
     rPink = (165,182,155,192,0,255)
     hsvFrame  = cv2.cvtColor(self.frame.copy(), cv2.COLOR_BGR2HSV)
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rGreen[0],rGreen[2],rGreen[4]]),np.array([rGreen[1],rGreen[3],rGreen[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     greenPt = (int((x+x+w)/2),int((y+y+h)/2))
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rPink[0],rPink[2],rPink[4]]),np.array([rPink[1],rPink[3],rPink[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     pinkPt = (int((x+x+w)/2),int((y+y+h)/2))
     self.roverPos = (int((greenPt[0]+pinkPt[0])/2),int((greenPt[1]+pinkPt[1])/2))
     angle = getAngle(pinkPt[0],pinkPt[1],greenPt[0],greenPt[1])
     self.roverHeading = 360+angle[2]*-1
     return greenPt, pinkPt
Exemplo n.º 20
0
    def _findCorner(self):
        """
        Find the corners by using some morphological operator and then use
        the connectedComponents in order to isolate them.
        """
        # a few morphological operator in order to find corners
        self.img_corner = cv2.dilate(self.img, CheckboxConstant.cross)
        self.img_corner = cv2.erode(self.img_corner, CheckboxConstant.diamond)
        temp = cv2.dilate(self.img, CheckboxConstant.xelem)
        temp = cv2.erode(temp, CheckboxConstant.rect)
        self.img_corner = cv2.absdiff(temp, self.img_corner)

        # threshold
        ret, self.img_corner = cv2.threshold(255-self.img_corner, 190, 255, 0)
        # find the different area
        temp = connectedComponents(self.img_corner, connectivity=8)
        N = np.max(temp)
        # loop over each region except background
        for n in range(1, N):
            # average position of the corner
            index = np.array((0, 0), int)
            count = 0
            for i in range(temp.shape[0]):
                for j in range(temp.shape[1]):
                    if temp[i, j] == n:
                        index += (i, j)
                        count += 1
            if count != 0:
                index /= count
                self.corners.append((index[0], index[1]))
Exemplo n.º 21
0
def segment_on_dt(img):
    #http://stackoverflow.com/questions/11294859/how-to-define-the-markers-for-watershed-in-opencv
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)    
    _, img_bin = cv2.threshold(img_gray, 0, 255,cv2.THRESH_OTSU)
    img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN,numpy.ones((3, 3), dtype=int))
    border = cv2.dilate(img_bin, None, iterations=5)
    border = border - cv2.erode(border, None)

    dt = cv2.distanceTransform(img_bin, 2, 3)
    dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(numpy.uint8)
    _, dt = cv2.threshold(dt, 180, 255, cv2.THRESH_BINARY)
    lbl, ncc = label(dt)
    lbl = lbl * (255/ncc)
    # Completing the markers now. 
    lbl[border == 255] = 255

    lbl = lbl.astype(numpy.int32)
    cv2.watershed(img, lbl)

    lbl[lbl == -1] = 0
    lbl = lbl.astype(numpy.uint8)
    result = 255 - lbl
    result[result != 255] = 0
    result = cv2.dilate(result, None)
    img[result == 255] = (0, 0, 255)
    return img
Exemplo n.º 22
0
def ErodeTrick(im):
    cv2.erode(im, im, None, 3)
    cv2.dilate(im, im, None, 3)
    
    cv2.dilate(im, im, None, 3)
    cv2.erode(im, im, None, 3)
    return im
Exemplo n.º 23
0
    def Harris_Corner(self):
        self.threshold = 0.999999999999
        temp_i = self.image_i.copy()
        temp1_i = self.image_i.copy()
        gray_i = cv2.cvtColor(temp_i, cv2.COLOR_BGR2GRAY)
        gray_i = numpy.float32(gray_i)
        dst_i = cv2.cornerHarris(gray_i, 2, 3, 0.025)
        dst_i = cv2.dilate(dst_i, None)
        # Threshold for an optimal value, it may vary depending on the image.
        temp_i[dst_i < 0.01 * dst_i.max()] = [0, 0, 0]
        temp1_i[dst_i > 0.01 * dst_i.max()] = [0, 0, 255]
        hist_i = cv2.calcHist([temp_i], [0], None, [256], [0, 256])
        temp_j = self.image_j.copy()
        temp1_j = self.image_j.copy()
        gray_j = cv2.cvtColor(temp_j, cv2.COLOR_BGR2GRAY)
        gray_j = numpy.float32(gray_j)
        dst_j = cv2.cornerHarris(gray_j, 2, 3, 0.025)
        dst_j = cv2.dilate(dst_j, None)
        # Threshold for an optimal value, it may vary depending on the image.
        temp_j[dst_j < 0.01 * dst_j.max()] = [0, 0, 0]
        temp1_j[dst_j > 0.01 * dst_j.max()] = [0, 0, 255]
        hist_j = cv2.calcHist([temp_j], [0], None, [256], [0, 256])

        self.measure = cv2.compareHist(hist_i, hist_j, cv.CV_COMP_CORREL)
        self.assertGreater(self.measure, self.threshold)

        print self.measure
def foreground(bg_img, raw_img):

	#take the background and subtract somehow from the foreground
	img = raw_img*1
	raw_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
	bg_hsv = cv2.cvtColor(bg_img,cv2.COLOR_BGR2HSV)
	hmask = cv2.absdiff(raw_hsv[:,:,0], bg_hsv[:,:,0])
	smask = cv2.absdiff(raw_hsv[:,:,1], bg_hsv[:,:,1])
	vmask = cv2.absdiff(raw_hsv[:,:,2], bg_hsv[:,:,2])
	ret,hmask_thresh = cv2.threshold(hmask,1.,1.,cv2.THRESH_BINARY)
	ret,smask_thresh = cv2.threshold(smask,1.,1.,cv2.THRESH_BINARY)
	ret, vmask_thresh = cv2.threshold(vmask,1.,1.,cv2.THRESH_BINARY)
	hsv_mask = np.multiply(hmask_thresh, smask_thresh)
	hsv_mask = np.multiply(hsv_mask, vmask_thresh)
	hsv_mask = cv2.dilate(hsv_mask,(100,100) )

	###Filter out colors with extreme values and no red for skin###
	# ret, rmask = cv2.threshold(img[:,:,2],40,1., cv2.THRESH_BINARY)
	# ret, r2mask = cv2.threshold(img[:,:,2],235.,1., cv2.THRESH_BINARY_INV)
	# rb_mask = np.multiply(rmask, r2mask)
	# img[:,:,0 ]=	np.multiply(img[:,:,0], rb_mask)
	# img[:,:,1 ]=	np.multiply(img[:,:,1], rb_mask)
	# img[:,:,2 ]=	np.multiply(img[:,:,2], rb_mask)
	# bmask = cv2.absdiff(img[:,:,0], bg_img[:,:,0])
	# gmask = cv2.absdiff(img[:,:,1], bg_img[:,:,1])
	rmask = cv2.absdiff(img[:,:,2], bg_img[:,:,2])
	ret,rmask_thresh = cv2.threshold(rmask,20.,1.,cv2.THRESH_BINARY)


	##Greyscale mask that kinda worked except for bright lighting
	raw_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	#raw_gray = cv2.GaussianBlur(raw_gray, (5,5), 2)

	bg_gray = cv2.cvtColor(bg_img,cv2.COLOR_BGR2GRAY)
	#bg_gray = cv2.GaussianBlur(bg_gray, (5,5), 5)

	mask =cv2.absdiff(raw_gray, bg_gray)
	ret,mask = cv2.threshold(mask,15.,1.,cv2.THRESH_BINARY)
	###

	###make changes here
	mask = mask*1.0
	mask = np.multiply(rmask_thresh, mask)
	mask = np.multiply(hsv_mask, mask)
	mask = cv2.dilate(mask,(100,100))

	for i in range(4):
	 	mask = cv2.erode(mask*255, (50,50))/255.

 	for i in range(5):
 		mask = cv2.dilate(mask*255., (50,50))/255.

	fg_img = img*1.0
	fg_img[:,:,0 ]=	np.multiply(img[:,:,0], mask)
	fg_img[:,:,1 ]=	np.multiply(img[:,:,1], mask)
	fg_img[:,:,2 ]=	np.multiply(img[:,:,2], mask)

	cv2.imshow("fg_img", np.array(fg_img, dtype= "uint8"))

	return np.array(mask, dtype = "uint8")
Exemplo n.º 25
0
      def GenericFilter(self,infile,outfile):
	      #print infile,":",outfile
	      clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	      kernel1 = np.ones((5,5),np.uint8)
	      kernel2 = np.ones((3,3),np.uint8)
	      img = cv2.imread(str(infile),0)
	      cv2.imshow("img",img)
	      blur=cv2.medianBlur(img,5)
	      cl1 = clahe.apply(blur)
	      circles_mask = cv2.dilate(cl1,kernel1,iterations = 6)
	      circles_mask = (255-circles_mask)
	      circles_mask = cv2.threshold(circles_mask, 0, 255, cv2.THRESH_BINARY)[1]
	      edges = cv2.Canny(cl1,100,200)
	      dilation = cv2.dilate(edges,kernel1,iterations = 1)
	      display = cv2.bitwise_and(img,img,mask=dilation) 
	      cl2 = clahe.apply(display)
	      cl2 = clahe.apply(cl2)
	      ret,th = cv2.threshold(cl2,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
	      th = 255 - th
	      thg = cv2.adaptiveThreshold(display,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
			                  cv2.THRESH_BINARY,11,2)

	      saveimgfile=outfile.split(".")
	      print saveimgfile[1]

	      final = cv2.bitwise_and(dilation,dilation,mask=th) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"1."+str(saveimgfile[1]),final)
	      finalg = cv2.bitwise_and(dilation,dilation,mask=thg) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"2."+str(saveimgfile[1]),finalg)
	      finalg = 255 - finalg
	      abso = cv2.bitwise_and(dilation,dilation,mask=finalg) 
	      cv2.imwrite("Filtered-images/"+str(saveimgfile[0])+"orig."+str(saveimgfile[1]),abso)

	      cv2.waitKey(0)
Exemplo n.º 26
0
    def find_red_position ( frame ):
        pos_x = last_x
        pos_y = last_y

        hsv_img = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
        red_mask1 = cv2.inRange( hsv_img, lower_red_l, lower_red_h )
        red_mask2 = cv2.inRange( hsv_img, upper_red_l, upper_red_h )
       
        red_mask = red_mask1 + red_mask2       

        red_mask = cv2.erode( red_mask, kernel )
        red_mask = cv2.dilate( red_mask, kernel )

        red_mask = cv2.dilate( red_mask, kernel ) 
        red_mask = cv2.erode( red_mask, kernel )

        o_moments = cv2.moments( red_mask )
        d_m01 = o_moments['m01']
        d_m10 = o_moments['m10']
        d_area = o_moments['m00']
        #print "MOMENTS " + str(d_m01) + " " + str(d_m10) + " " + str(d_area)

        if d_area > 10000:
            pos_x = int(d_m10 / d_area)
            pos_y = int(d_m01 / d_area)
            #print "[" + str(pos_x) + " , " + str(pos_y) + "]"
        return pos_x, pos_y, red_mask
Exemplo n.º 27
0
def sameCardShape(shape1, shape2):
    #val = cv2.matchShapes(shape1, shape2, 1, 0.0)
    #print val
    (maxX1, maxY1) = (max(shape1[:,0,0]), max(shape1[:,0,1]))
    (minX1, minY1) = (min(shape1[:,0,0]), min(shape1[:,0,1]))
    (maxX2, maxY2) = (max(shape2[:,0,0]), max(shape2[:,0,1]))
    (minX2, minY2) = (min(shape2[:,0,0]), min(shape2[:,0,1]))

    maxXDiff = max(maxX1 - minX1, maxX2 - minX2)
    maxYDiff = max(maxY1 - minY1, maxY2 - minY2)

    image1 = np.zeros((maxYDiff,maxXDiff), np.uint8)
    cv2.drawContours(image1, [shape1], 0, 255, offset=(-minX1, -minY1))
    image1 = cv2.dilate(image1, np.ones((17,17), np.uint8))
    if DEBUGSHAPES:
        showImage(image1, 'contour1', wait=False)

    image2 = np.zeros((maxYDiff,maxXDiff), np.uint8)
    cv2.drawContours(image2, [shape2], 0, 255, offset=(-minX2, -minY2))
    image2 = cv2.dilate(image2, np.ones((17,17), np.uint8))
    if DEBUGSHAPES:
        showImage(image2, 'contour2', wait=False)


    intersectImage = cv2.bitwise_and(image1, image2)
    intersectCount = float(cv2.countNonZero(intersectImage))
    count1 = cv2.countNonZero(image1)
    count2 = cv2.countNonZero(image2)
    if DEBUGSHAPES:
        print str(intersectCount / count1 > shape_similarity_threshold or \
                  intersectCount / count2 > shape_similarity_threshold) + \
            ' intersect ratio = ' + str(intersectCount/count1) + ', ' + str(intersectCount/count2)
        showImage(intersectImage, 'and')
    
    return intersectCount / min(count1, count2) > shape_similarity_threshold
Exemplo n.º 28
0
    def find_yellow_contours(self, split_image):
        lab_bthreshed_board = cv2.adaptiveThreshold(split_image.lab[2], 255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 
            self.options["board_blocksize"], self.options["board_C_b"])
        yuv_uthreshed_board = cv2.adaptiveThreshold(split_image.yuv[2], 255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 
            self.options["board_blocksize"], self.options["board_C_u"])
        yuv_uthreshed_board = cv2.bitwise_not(yuv_uthreshed_board)
        finalThreshed = lab_bthreshed_board & yuv_uthreshed_board

        # Erode and dilate thresholded images
        morph_size = self.options["board_morph_size"]
        erode_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_size * 2 + 1, morph_size * 2 + 1),
                                                  (morph_size, morph_size))
        dilate_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_size * 2 + 1, morph_size * 2 + 1),
                                                   (morph_size, morph_size))
        eroded = cv2.erode(finalThreshed,erode_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.dilate(eroded, dilate_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.dilate(finalThreshed, dilate_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.erode(finalThreshed, erode_element, iterations = self.options["board_morph_iter"])

        self.post_if_enabled('yellow_lab_bthreshed', lab_bthreshed_board)
        self.post_if_enabled('yellow_yuv_uthreshed', yuv_uthreshed_board)
        self.post_if_enabled('yellow_binary_image', finalThreshed)

        _, contours, hierarchy = cv2.findContours(np.copy(finalThreshed), 
            cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        return contours, hierarchy
Exemplo n.º 29
0
def marcarRectas(img) :

    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    flag,b = cv2.threshold(gray,100,255,cv2.THRESH_BINARY)

    element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    cv2.dilate(b,element)

    edges = cv2.Canny(b,100,255)

    lines90 = cv2.HoughLinesP(edges,1, np.pi/180, 100)
    lines180 = cv2.HoughLinesP(edges,1, np.pi, 100)

    if(lines90!=None):
        l = lines90.tolist()
        for x1,y1,x2,y2 in l[0]:
            cv2.line(img, (x1,y1), (x2,y2), (0,255,0), 5)

    #if(lines180!=None):
    #    l = lines180.tolist()
    #    for x1,y1,x2,y2 in l[0]:
    #        cv2.line(img, (x1,y1), (x2,y2), (0,255,0), 3)

    return img
Exemplo n.º 30
0
def preprocess(gray):
    # 1. Sobel算子,x方向求梯度
    sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize = 3)
    # 2. 二值化
    ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)

    # 3. 膨胀和腐蚀操作的核函数
    element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 9))
    element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (24, 6))

    # 4. 膨胀一次,让轮廓突出
    dilation = cv2.dilate(binary, element2, iterations = 1)

    # 5. 腐蚀一次,去掉细节,如表格线等。注意这里去掉的是竖直的线
    erosion = cv2.erode(dilation, element1, iterations = 1)

    # 6. 再次膨胀,让轮廓明显一些
    dilation2 = cv2.dilate(erosion, element2, iterations = 3)
    # dilation2 = cv2.dilate(erosion, element1, iterations = 1)

    # 7. 存储中间图片
    cv2.imwrite("temp/gray.png", gray)
    cv2.imwrite("temp/binary.png", binary)
    cv2.imwrite("temp/dilation.png", dilation)
    cv2.imwrite("temp/erosion.png", erosion)
    cv2.imwrite("temp/dilation2.png", dilation2)

    return dilation2
Exemplo n.º 31
0
    frame_out = frame.copy()

    # Hiển thị số frame trên góc trái video
    if dict['text_overlay']:
        str_on_frame = "%d/%d" % (video_cur_frame, video_info['num_of_frames'])
        cv2.putText(frame_out, str_on_frame, (5, 30), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (0, 255, 255), 2, cv2.LINE_AA)
        cv2.putText(frame_out, global_str + str(round(change_pos, 2)) + 'sec', (5, 60), cv2.FONT_HERSHEY_SIMPLEX,
                    0.8, (255, 0, 0), 2, cv2.LINE_AA)

    # motion detection cho mọi objects
    if dict['motion_detection']:
        fgmask = fgbg.apply(frame_blur)
        bw = np.uint8(fgmask == 255) * 255
        bw = cv2.erode(bw, kernel_erode, iterations=1)
        bw = cv2.dilate(bw, kernel_dilate, iterations=1)
        (_, cnts, _) = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # Áp loop cho contours
        for c in cnts:
            # Nếu contours quá nhỏ thì bỏ qua
            if cv2.contourArea(c) < dict['min_area_motion_contour']:
                continue
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame_out, (x, y), (x + w, y + h), (255, 0, 0), 1)

    # Detect xe và chỗ trống
    if dict['parking_detection']:
        for ind, park in enumerate(parking_data):
            points = np.array(park['points'])
            rect = parking_bounding_rects[ind]
            roi_gray = frame_gray[rect[1]:(rect[1] + rect[3]),
Exemplo n.º 32
0
    def camera_callback(self, data):
        start_time = time.time()
        self.recieved_image = True

        try:
            # We select bgr8 because its the OpenCV encoding by default
            cv_image = self.bridge_object.imgmsg_to_cv2(
                data, desired_encoding="bgr8")

            #resize
            #height, width, channels = cv_image.shape
            #cv_image = cv2.resize(cv_image, (width/10, height/10), interpolation = cv2.INTER_AREA)

            #crop
            height, width, channels = cv_image.shape
            descentre = -140
            rows_to_watch = 200
            #crop_image = cv_image[(height)/2+descentre:(height)/2+(descentre+rows_to_watch)][1:width]
            crop_image = cv_image

            #blur
            #blur = cv2.GaussianBlur(crop_image, (15,15), 0)
            #blur = cv2.bilateralFilter( crop_image, 5, 75 ,75)
            blur = crop_image

            #hsv
            hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

            #mask
            par = self.myHSV
            lower = np.array([par[0], par[1], par[2]])
            upper = np.array([par[3], par[4], par[5]])
            mask = cv2.inRange(hsv, lower, upper)

            #filters
            mask = cv2.erode(mask, None, iterations=2)
            mask = cv2.dilate(mask, None, iterations=2)

            # find contours

            cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[1]

            if len(cnts) > 0:
                c = max(cnts, key=cv2.contourArea)
                ((x, y), radius) = cv2.minEnclosingCircle(c)
                if radius > 5:
                    m = cv2.moments(c)
                    cx, cy = m['m10'] / m['m00'], m['m01'] / m['m00']
                    self.target_found = True
                    cv2.circle(crop_image, (int(cx), int(cy)), int(radius),
                               (0, 255, 255), 2)
                    self.object_location = cx - width / 2
                else:
                    self.target_found = False
                    #print 'target too small', radius
            else:
                self.target_found = False

                #cv2.circle(res, (int(cx), int(cy)), 10, (255,0,0), -1)

        except CvBridgeError as e:
            print(e)

        #print(cv2)
        #cv2.imshow("Image window", cv_image)
        #cv2.imshow("Cropped", crop_image)
        #cv2.imshow("HSV", hsv)
        #cv2.imshow("Masked", res)
        cv2.waitKey(1)
        elapsed_time = time.time() - start_time
model.load_state_dict(torch.load("path_to_model.pt"))
model.to(torch.device('cuda'))


# cv.namedWindow("Img", cv.WINDOW_AUTOSIZE)
# outVid = cv.VideoWriter('output.avi',cv.VideoWriter_fourcc('M','J','P','G'), 1, (1920,1080))

# Read Image 
img = cv.imread("path_to_image")
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY);
__, imgGray = cv.threshold(imgGray, 150, 255, cv.THRESH_BINARY_INV); 

# Find Rectangular Region in Image
vSE = cv.getStructuringElement(cv.MORPH_RECT, (1, 20)); 
vErodeImg = cv.erode(imgGray, vSE)
vDilateImg = cv.dilate(vErodeImg, vSE)

hSE = cv.getStructuringElement(cv.MORPH_RECT, (20, 1)); 
hErodeImg = cv.erode(imgGray, hSE);
hDilateImg = cv.dilate(hErodeImg, hSE) 

binaryImg = vDilateImg + hDilateImg; 

imgContours = cv.findContours(binaryImg, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)

for cnt in reversed(imgContours[0]):
    
    x,y,w,h = cv.boundingRect(cnt)
    
    area = cv.contourArea(cnt)
    imgDisp = cp.deepcopy(img)
Exemplo n.º 34
0
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: xiao
# Created Time : 2019-05-22
# File Name: erode_dilate.py
# Description:
"""

import cv2
import numpy as np 



if __name__=='__main__':
    imgfn = 'test.jpg'
    img = cv2.imread(imgfn)
    for k in range(3,8):
        kernel = np.ones((k,k),np.uint8)  
        erosion = cv2.erode(img,kernel,iterations = 1)
        dilation = cv2.dilate(img,kernel,iterations = 1)
        closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
        opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        new_img = np.vstack([erosion, dilation, closing, opening])
        cv2.imshow('newimg_k_{}'.format(k), new_img)
        cv2.waitKey()
    ret, frame = video_capture.read()
    #Flip the frame to avoid mirroring effect
    frame = cv2.flip(frame,1)
    #Resize the given frame to a 600*600 window
    frame = imutils.resize(frame, width = 600)
    #Blur the frame using Gaussian Filter of kernel size 5, to remove excessivve noise
    blurred_frame = cv2.GaussianBlur(frame, (5,5), 0)
    #Convert the frame to HSV, as HSV allow better segmentation.
    hsv_converted_frame = cv2.cvtColor(blurred_frame, cv2.COLOR_BGR2HSV)

    #Create a mask for the frame, showing green values
    mask = cv2.inRange(hsv_converted_frame, greenLower, greenUpper)
    #Erode the masked output to delete small white dots present in the masked image
    mask = cv2.erode(mask, None, iterations = 2)
    #Dilate the resultant image to restore our target
    mask = cv2.dilate(mask, None, iterations = 2)

    #Find all contours in the masked image
    cnts,_ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    #Define center of the ball to be detected as None
    center = None

    #If any object is detected, then only proceed
    if(len(cnts) > 0):
        #Find the contour with maximum area
        c = max(cnts, key = cv2.contourArea)
        #Find the center of the circle, and its radius of the largest detected contour.
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        #Calculate the centroid of the ball, as we need to draw a circle around it.
        M = cv2.moments(c)
Exemplo n.º 36
0
files = sorted(glob.glob(os.path.join(faces_folder_path, "*")))

x_coord = []
y_coord = []
for filename in files:
    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    gray = np.float32(gray)
    dst = cv2.cornerHarris(gray, 2, 3, 0.04)

    a = np.zeros(shape=(240, 352))
    for i in range(0, 240):
        for j in range(0, 352):
            if dst[i][j] > 0.01 * dst.max():
                x_coord.append(i)
                y_coord.append(j)

    #
    # print "count = ",count
    #result is dilated for marking the corners, not important
    dst = cv2.dilate(dst, None)
    # print dst.max(),dst.min()
    cluster(x_coord, y_coord)

    # Threshold for an optimal value, it may vary depending on the image.
    img[dst > 0.01 * dst.max()] = [0, 0, 255]

    cv2.imshow('dst', img)
    if cv2.waitKey(0) & 0xff == 27:
        cv2.destroyAllWindows()
            if frame is None:
                break
            # Display camera input
            image = frame
            #define the processing reagion
            crop_img = frame[450:700, 450:780]

            # convert to grayscale, gaussian blur, and threshold
            gray = cv.cvtColor(crop_img, cv.COLOR_BGR2GRAY)
            blur = cv.GaussianBlur(gray, (5, 5), 0)
            ret, th = cv.threshold(blur, 60, 255, cv.THRESH_BINARY_INV)
            edged = cv.Canny(blur, 85, 85)

            # Erode to eliminate noise, Dilate to restore eroded parts of image
            mask1 = cv.erode(th, None, iterations=2)
            mask = cv.dilate(mask1, None, iterations=2)

            # Find all contours in frame
            contours, hierarchy = cv.findContours(th.copy(), 1,
                                                  cv.CHAIN_APPROX_NONE)
            # Find x-axis centroid of largest contour and cut power to appropriate motor
            # to recenter camera on centroid.
            if len(contours) > 0:
                c = max(contours, key=cv.contourArea)
                M = cv.moments(c)

                if M["m00"] != 0:
                    cx = int(M['m10'] / M['m00'])
                    cy = int(M['m01'] / M['m00'])
                    print(cx, "value")
Exemplo n.º 38
0
import numpy as np
import cv2



img = cv2.imread('/home/pi/Desktop/myFile', 0)
img = cv2.resize(img, (400, 400))
size = np.size(img)
skel = np.zeros(img.shape, np.uint8)

ret, img = cv2.threshold(img, 127, 255, 0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))

done = False;

while( not done):
    eroded = cv2.erode(img, element)
    temp = cv2.dilate(eroded, element)
    temp = cv2.subtract(img, temp)
    skel = cv2.bitwise_or(skel, temp)
    img = eroded.copy()

    zeros = size - cv2.countNonZero(img)
    if zeros == size:
        done = True

cv2.imshow("skel", skel)
cv2.waitKey(0)
cv2.destroyAllWindows()
low = 90
up = 255

while (1):
    _, image = cam.read()
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gaussian1 = cv2.GaussianBlur(gray, (7, 7), 0)
    #gaussian2 = cv2.medianBlur(gray,5)
    kernal = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
    _, thresh = cv2.threshold(gray, low, up,
                              cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    #_,thresh = cv2.threshold(gaussian,low,up,cv2.THRESH_BINARY_INV)
    #kernal = cv2.getStructuringElement(cv2.MORPH_RECT,(10,10))
    #kernal = np.ones((5, 5), np.uint8)
    erosion = cv2.erode(thresh, kernal, iterations=1)
    dilate = cv2.dilate(thresh, kernal, iterations=1)
    contours, _ = cv2.findContours(thresh, cv2.RETR_LIST,
                                   cv2.CHAIN_APPROX_SIMPLE)

    for cnt in contours:
        area = cv2.contourArea(cnt)
        approx = cv2.approxPolyDP(cnt, 0.03 * cv2.arcLength(cnt, True), True)
        x = approx.ravel()[0]
        y = approx.ravel()[1]

        if area > 5000:
            cv2.drawContours(image, [approx], -1, (0, 255, 0), 2)
            if (len(approx) == 4):
                x, y, width, height = cv2.boundingRect(approx)
                aspectRatio = width / float(height)
                print(aspectRatio)
Exemplo n.º 40
0
def detectObstacles(hsv_frame, frame, draw):
    A1 = None
    d1 = None
    x = None
    high_black = np.array([85, 70, 40])
    low_black = np.array([0, 0, 0])

    img_binary1 = cv2.inRange(hsv_frame.copy(), low_black, high_black)
    img_binary1 = cv2.dilate(img_binary1, None, iterations=1)

    # Finding Center
    img_contours1 = img_binary1.copy()
    contours1 = cv2.findContours(img_contours1, cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)[-2]

    # Finding largest contour and locating x,y values and width
    obstacle1_d = None
    obstacle1_A = None
    obstacle2_d = None
    obstacle2_A = None
    obstacle3_d = None
    obstacle3_A = None
    w_min = 10
    if len(contours1) > 0:
        obstacles = []
        Contour_size = []
        count = 1
        for i, c in enumerate(contours1):
            area = cv2.contourArea(c)
            obstacles.append(area)
        # Sort area by largest to smallest
        Contour_size = sorted(zip(obstacles, contours1),
                              key=lambda x: x[0],
                              reverse=True)

        if len(Contour_size) > 2:
            obstacle3 = Contour_size[2][1]
            obstacle3_area = cv2.minAreaRect(obstacle3)
            obstacle3_box = cv2.boxPoints(obstacle3_area)
            obstacle3_box = np.int0(obstacle3_box)
            obstacle3_w = abs(obstacle3_box[2] - obstacle3_box[0])
            obstacle3_x, obstacle3_y = abs(obstacle3_box[0])
            obstacle3_x2, obstacle3_y2 = obstacle3_w
            if draw == True:
                if obstacle3_x2 >= w_min:
                    drawobstacle3 = cv2.drawContours(frame, [obstacle3_box], 0,
                                                     (0, 255, 0), 2)
                    obstacle3_d = (15 * 300) / obstacle3_x2
                    obstacle3_A = (((obstacle3_x + (obstacle3_x2 / 2)) /
                                    (320 / 108)) - 54)
                    cv2.putText(frame, "D: " + str(obstacle3_d), (4, 140),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                    cv2.putText(frame, "A: " + str(obstacle3_A), (4, 125),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                    cv2.putText(frame, "Obstacle3", (4, 110),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))

        if len(Contour_size) > 1:
            obstacle2 = Contour_size[1][1]
            obstacle2_area = cv2.minAreaRect(obstacle2)
            obstacle2_box = cv2.boxPoints(obstacle2_area)
            obstacle2_box = np.int0(obstacle2_box)
            obstacle2_w = abs(obstacle2_box[2] - obstacle2_box[0])
            obstacle2_x, obstacle2_y = abs(obstacle2_box[0])
            obstacle2_x2, obstacle2_y2 = obstacle2_w
            if draw == True:
                if obstacle2_x2 >= w_min:
                    drawobstacle2 = cv2.drawContours(frame, [obstacle2_box], 0,
                                                     (0, 255, 0), 2)
                    obstacle2_d = (15 * 300) / obstacle2_x2
                    obstacle2_A = (((obstacle2_x + (obstacle2_x2 / 2)) /
                                    (320 / 108)) - 54)
                    cv2.putText(frame, "D: " + str(obstacle2_d), (4, 95),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                    cv2.putText(frame, "A: " + str(obstacle2_A), (4, 80),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                    cv2.putText(frame, "Obstacle2", (4, 65),
                                cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))

        square = max(contours1, key=cv2.contourArea)
        area = cv2.minAreaRect(square)
        box = cv2.boxPoints(area)
        box = np.int0(box)
        w = abs(box[2] - box[0])
        xyes, yyes = abs(box[0])
        x, y = w
        if draw == True:
            if x >= w_min:
                draw = cv2.drawContours(frame, [box], 0, (0, 255, 0), 2)
                #Focal Length = (width@10cm * 10cm)/actual width = 255
                obstacle1_d = (15 * 300) / x
                obstacle1_A = (((xyes + (x / 2)) / (320 / 108)) - 54)
                cv2.putText(frame, "D: " + str(obstacle1_d), (4, 50),
                            cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                cv2.putText(frame, "A: " + str(obstacle1_A), (4, 35),
                            cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255))
                cv2.putText(frame, "Obstacle", (4, 20), cv2.FONT_HERSHEY_PLAIN,
                            1, (0, 255, 255))

    return [[obstacle1_d, obstacle1_A], [obstacle2_d, obstacle2_A],
            [obstacle3_d, obstacle3_A]]
for r, d, f in os.walk(path):
    tamanhoImagem = len(f)
    for filename in f:
        imagem = cv2.imread(os.path.join(path, filename))
        imagem = cv2.resize(imagem, dim, interpolation=cv2.INTER_AREA)
        imagemBin = 255 - imagem[:,:,0]
        
        novaImagem = np.zeros(np.shape(imagemBin))
        kernel = np.ones((3,3), np.uint8)
        novaImagem = normalizaImagem((imagemBin>100)*1)

        imCopy = np.copy(novaImagem)
        imPlot = np.zeros(np.shape(imagem))
        imPlot[:,:,0] = imPlot[:,:,1] = imPlot[:,:,2] = imCopy

        novaImagem = cv2.dilate(novaImagem, kernel, iterations=1) - novaImagem
        novaImagem = cv2.resize(novaImagem, dim, interpolation=cv2.INTER_AREA)

        max_xy = np.where(novaImagem == 255)
        novaImagemRGB = np.zeros(np.shape(imagem))
        novaImagemRGB[:,:,0] = novaImagemRGB[:,:,1] = novaImagemRGB[:,:,2] = novaImagem

        cv2.circle(novaImagemRGB, (max_xy[1][0], max_xy[0][0]), int(3), (0,0,255), 2)
        iniciarPonto = (max_xy[0][0], max_xy[1][0])
        ponto = verifique(novaImagem, iniciarPonto, 4)

        while(ponto!=iniciarPonto):
            cv2.circle(imPlot, (ponto[1], ponto[0]), int(3), (0, 255, 255), 4)
            ponto = verifique(novaImagem, ponto, 4)
        
        print(ChainCode)
Exemplo n.º 42
0
def find_banana(image, ruta='resultados'):
    #Se invierte el esquema RGB a BGR, ya que las funciones son más compatibles
    #teniendo el azul con más relevancia
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # cv2.imwrite('resultados/bgr.jpg', image)

    #Un tamaño fijo
    #con la dimension mas grande
    max_dimension = max(image.shape)
    #La escala de la imagen de salida no será mayor a 700px
    scale = 700 / max_dimension
    #Se redimenciona la imagen para que sea cuadrada.
    image = cv2.resize(image, None, fx=scale, fy=scale)

    #Reducimos el ruido de la imagen usando el filtro Gaussiano, con la escala
    #maxima cuadrada.
    # image_blur = cv2.bilateralFilter(image,9,75,75)
    image_blur = cv2.GaussianBlur(image, (7, 7), 0)
    cv2.imwrite(ruta + '/blur.jpg', image_blur)

    #Tratamos de enfocarnos en el color, y por esta razón nos enfocamos en
    #el esquema HSV, pues resalta el color y maneja solo saturacion y
    #valor
    image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)
    cv2.imwrite(ruta + '/hsv.jpg', image_blur_hsv)

    #kernel = np.ones((5,5),np.uint8)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    erosion = cv2.erode(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/erosionado.jpg', erosion)
    dilation = cv2.dilate(image_blur_hsv, kernel, iterations=1)
    cv2.imwrite(ruta + '/dilatado.jpg', dilation)
    dilation_blur = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/dilatado_blur.jpg', dilation_blur)

    # Filtro por color
    # 20-30 hue
    """Aqui tenemos un problema, pues decidimos colocar un rango de amarillos
    pero no lo reconoce en la imagen y tenemos dificultad al reconocer este
    rango, ya que a veces toma colores que no debe. Según el tono es de
    50 a 70 para amarillos"""
    #hsv(15, 80, 50)
    #hsv(105, 120, 255)
    min_yellow = np.array([15, 100, 80])
    max_yellow = np.array([105, 255, 255])
    # min_yellow = np.array([20, 100, 80])
    # max_yellow = np.array([30, 255, 255])
    #layer
    mask1 = cv2.inRange(dilation_blur, min_yellow, max_yellow)

    #hsv(230, 0, 0)
    #hsv(270, 255, 255)
    black_min = np.array([130, 0, 0])
    black_max = np.array([170, 255, 255])
    black_mask = cv2.inRange(dilation_blur, black_min, black_max)
    cv2.imwrite(ruta + '/mascara_negro.jpg', black_mask)

    #Filtro por brillo
    # 170-180 hue
    #Tratamos de resaltar el brillo para tener un mejor reconocimiento de
    #colores.
    #hsv(170,100,80)
    #hsv(180,255,255)
    min_yellow2 = np.array([170, 100, 80])
    max_yellow2 = np.array([180, 255, 255])
    mask2 = cv2.inRange(dilation_blur, min_yellow2, max_yellow2)
    cv2.imwrite(ruta + '/mascara1.jpg', mask1)
    cv2.imwrite(ruta + '/mascara2.jpg', mask2)

    #Combinamos las mascaras de colores.
    mask = mask1 + mask2 + black_mask
    cv2.imwrite(ruta + '/mask.jpg', mask)
    # opening = cv2.morphologyEx(dilation, cv2.MORPH_OPEN, kernel)
    # cv2.imwrite('resultados/opening.jpg', opening)

    # Se limpia la imagen y se crea la elipse.

    #Se erosiona la imagen para reducir espacios sin color. Y luego se dilata,
    #Esto dentro de lo que buscamos encerrar.
    mask_closed = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    mask_closed = cv2.dilate(mask_closed, kernel, iterations=3)
    # mask_closed = cv2.dilate(mask_closed, kernel, iterations = 1)
    # mask_closed = cv2.morphologyEx(mask_closed, cv2.MORPH_CLOSE, kernel)
    cv2.imwrite(ruta + '/closed.jpg', mask_closed)
    #Se dilata para reducr ruido afuera de lo que identificamos, y luego se erosiona.
    mask_clean = cv2.morphologyEx(mask_closed, cv2.MORPH_OPEN, kernel)
    cv2.imwrite(ruta + '/open.jpg', mask_clean)

    # Se encuentra el mejor patron y se recibe el contorno
    big_banana_contour, mask_bananas = find_biggest_contour(mask_clean)

    # Se resalta la mascara limpia y se aclara en la imagen.
    overlay = overlay_mask(mask_clean, image)
    cv2.imwrite(ruta + '/overlay.jpg', overlay)

    #Se circula el patron con mejor coincidencia.
    circled, cropped = circle_contour(image, big_banana_contour, ruta)

    #Y convertimos al esquema de original de colores.
    cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)

    return circled, cropped
Exemplo n.º 43
0
def detectWord(img, toDetect):
    processedImg = arabic_processing(img)
    preparedImg = prepareImg(processedImg, 600)

    if not os.path.exists('cropped'):
        os.makedirs('cropped')

    img1 = preparedImg

    kernel = createKernel(kernelSize=15, sigma=11, theta=6)
    imgFiltered = cv2.filter2D(preparedImg,
                               -1,
                               kernel,
                               borderType=cv2.BORDER_REPLICATE).astype(
                                   np.uint8)
    imgFiltered = cv2.dilate(imgFiltered, circle_kernel, iterations=5)
    (_, imgThres) = cv2.threshold(imgFiltered, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    imgThres = 255 - imgThres

    # Find the contours
    contours, hierarchy = cv2.findContours(imgThres, cv2.RETR_LIST,
                                           cv2.CHAIN_APPROX_SIMPLE)
    minArea = 250
    res = []
    for c in contours:
        # skip small word candidates
        if cv2.contourArea(c) < minArea:
            continue
# append bounding box and image of word to result list
        currBox = cv2.boundingRect(c)  # returns (x, y, w, h)
        (x, y, w, h) = currBox
        currImg = preparedImg[y:y + h, x:x + w]
        res.append((currBox, currImg))

    res = sorted(res, key=lambda entry: entry[0][0])
    i = 0
    w, h = 5, len(res)
    Matrix = [[0 for x in range(w)] for y in range(h)]

    for (j, w) in enumerate(res):
        (wordBox, wordImg) = w
        (x, y, w, h) = wordBox
        yr = range(y - 10, y + h + 10)
        xr = range(x - 5, x + w + 2)
        for k in range(img1.shape[0]):
            for g in range(img1.shape[1]):
                if not (g in xr and k in yr):
                    img1[k, g] = 255
        Matrix[i][0] = y - 10
        Matrix[i][1] = y + h + 10
        Matrix[i][2] = x - 5
        Matrix[i][3] = x + w + 2
        Matrix[i][4] = 'cropped\pic000%d.png' % (i, )
        cv2.imwrite('cropped/pic{:>05}.png'.format(i), img1)
        imgx = arabic_processing(img)
        img1 = prepareImg(imgx, 600)
        i += 1

    k = 0
    y = toDetect

    croppedImages = glob.glob("cropped/*.png")
    for image in croppedImages:
        with open(image, 'rb') as file:
            text = pytesseract.image_to_string(Image.open(file),
                                               lang='ara',
                                               config=" --psm 10 ")
            text = removeSpecial(text)
            y = removeSpecial(y)
            #text=removeHamza(text)
            #y=removeHamza(y)
            if text == y:
                for t in range(len(res)):
                    if (str(image) == str(Matrix[t][4])):
                        cv2.rectangle(img1, (Matrix[t][2], Matrix[t][0]),
                                      (Matrix[t][3], Matrix[t][1]), 0, 1)

            k += 1

    return img1
Exemplo n.º 44
0
    cy = y + y1
    return cx, cy


detect = []
offset = 6  # allowable errorbetween pixel
counter = 0

while True:
    ret, frame1 = cap.read()
    grey = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(grey, (3, 3), 5)

    # applying on each frame
    img_sub = algorithm.apply(blur)
    dilat = cv2.dilate(img_sub, np.ones((5, 5)))
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

    dilatada = cv2.morphologyEx(dilat, cv2.MORPH_CLOSE, kernel)
    dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel)
    dilatada = cv2.morphologyEx(dilatada, cv2.MORPH_CLOSE, kernel)
    counterSahpe, h = cv2.findContours(dilatada, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)

    cv2.line(frame1, (25, count_line_position), (1200, count_line_position),
             (255, 127, 0), 3)

    for (i, c) in enumerate(counterSahpe):
        (x, y, w, h) = cv2.boundingRect(c)
        validate_counter = (w >= min_width_rect) and (h >= min_height_rect)
Exemplo n.º 45
0
    # if we are viewing a video and we did not grab a frame,
    # then we have reached the end of the video
    if args.get("video") and not grabbed:
        break
# capture frames from the camera
#for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# resize the frame, blur it, and convert it to the HSV
# color space
# frame = imutils.resize(frame.array, width=600) # for picamera
    frame = imutils.resize(frame, width=600)  # for video

    # blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # Threshold the HSV image to get only cyan colors
    maskTable = cv2.inRange(hsv, lower_cyan, upper_cyan)
    maskTable = cv2.dilate(maskTable, None, iterations=10)
    maskTable = cv2.erode(maskTable, None, iterations=10)

    # Threshold the HSV image for white color
    maskWhite = cv2.inRange(hsv, lower_white, upper_white)
    #  maskWhite = cv2.dilate(maskWhite, None, iterations=2)
    #  maskWhite = cv2.erode(maskWhite, None, iterations=2)
    #maskWhite = cv2.morphologyEx(maskWhite, cv2.MORPH_OPEN, kernel)
    maskWhite = maskWhite & maskTable

    # Threshold the HSV image for red color
    maskRed1 = cv2.inRange(hsv, lower_red1, upper_red1)
    maskRed2 = cv2.inRange(hsv, lower_red2, upper_red2)
    maskRed = maskRed1 | maskRed2
    maskRed = cv2.dilate(maskRed, None, iterations=1)
    maskRed = cv2.erode(maskRed, None, iterations=2)
    blur = cv2.blur(frame,(3,3))
 	
 	#Convert to HSV color space
    hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
    
    #Create a binary image with where white will be skin colors and rest is black
    mask2 = cv2.inRange(hsv,np.array([2,50,50]),np.array([15,255,255]))
    
    #Kernel matrices for morphological transformation    
    kernel_square = np.ones((11,11),np.uint8)
    kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    
    #Perform morphological transformations to filter out the background noise
    #Dilation increase skin color area
    #Erosion increase skin color area
    dilation = cv2.dilate(mask2,kernel_ellipse,iterations = 1)
    erosion = cv2.erode(dilation,kernel_square,iterations = 1)    
    dilation2 = cv2.dilate(erosion,kernel_ellipse,iterations = 1)    
    filtered = cv2.medianBlur(dilation2,5)
    kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(8,8))
    dilation2 = cv2.dilate(filtered,kernel_ellipse,iterations = 1)
    kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    dilation3 = cv2.dilate(filtered,kernel_ellipse,iterations = 1)
    median = cv2.medianBlur(dilation2,5)
    ret,thresh = cv2.threshold(median,127,255,0)
    
    #Find contours of the filtered frame
    #contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    _,contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #Draw Contours
    #cv2.drawContours(frame, cnt, -1, (122,122,0), 3)
Exemplo n.º 47
0
 def make_mask(self):
     mask_temp = cv2.inRange(self.image, self.lower_red, self.upper_red)
     mask_eroded = cv2.erode(mask_temp, self.kernel_erode)
     self.mask = cv2.dilate(mask_eroded, self.kernel_dilate, iterations=1)
Exemplo n.º 48
0
def dilation(img, kernel_size=5):
    kernel = np.ones((kernel_size, kernel_size), np.uint8)
    return cv2.dilate(img, kernel, iterations=1)
    # resize the frame, convert it to grayscale, and blur it
    # frame = imutils.resize(frame, width=500,height=500)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # print firstFrame.shape,gray.shape
    # if the first frame is None, initialize it
    if firstFrame is None:
        firstFrame = gray
        continue

    # compute the absolute difference between the current frame and
    # first frame
    frameDelta = cv2.absdiff(firstFrame, gray)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)

    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < args["min_area"]:
            continue

        # compute the bounding box for the contour, draw it on the frame,
        # and update the text

        (x, y, w, h) = cv2.boundingRect(c)

        crop_image = frame[y:y + h, x:x + w]
Exemplo n.º 50
0
    y = cv2.getTrackbarPos('Y','image')
    u = cv2.getTrackbarPos('U','image')
    v = cv2.getTrackbarPos('V','image')

    img[:] = [y,u,v]
    img= cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
    #cv2.imshow('image',img)
    boln,f = cap.read()
    img_yuv = cv2.cvtColor(f, cv2.COLOR_BGR2YUV)
    #cv2.imshow("g1",img_yuv)
    #img_yuv[:,:,2] = cv2.equalizeHist(img_yuv[:,:,2])
    #cv2.imshow("g2",img_yuv
    mask = cv2.inRange(img_yuv, (np.array([0,u-45,v-45])), (np.array([255,u+45,v+45])))
    cv2.imshow("Masking",mask)
    erode = cv2.erode(mask,None,iterations = 1)
    dilate = cv2.dilate(erode,None,iterations = 1)
    image,contours,hierarchy = cv2.findContours(dilate,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
    for cnt in contours:
    #if len(contours)>0:
        #c = max(contours, key=cv2.contourArea)

        x, y, w, h = cv2.boundingRect(cnt)
        cv2.rectangle(f,(x,y),(x+w,y+h),[255,0,0],2)
        #print x, y, w, h

    #img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
    #cv2.imshow('Histogram equalized', f)
    if cv2.waitKey(25)&0xff==27:
	break

cap.release()
Exemplo n.º 51
0
def back_end(image):
    
    inimg1=image
    inimg = cv2.blur(inimg1, (5,5))
    hsv = cv2.cvtColor(inimg, cv2.COLOR_BGR2HSV)

    lowerred = np.array([0, 70, 70], dtype=np.uint8)          #hsv range for red
    upperred = np.array([20, 255, 255], dtype=np.uint8)
    lowerblue = np.array([98, 50, 50], dtype=np.uint8)        #hsv range for blue
    upperblue = np.array([170, 255, 255], dtype=np.uint8)
    lowergreen = np.array([75, 50, 50], dtype=np.uint8)       #hsv range for green
    uppergreen = np.array([90, 255, 170], dtype=np.uint8)



    p=0               #color maping 0 for green ,1 for red and 2 for blue
    d=[]
    
    shape= "no_CM"     #initial values
    contclr= "no_CM"


    while(p<3):

        if(p==0):

            color="green"
            lower=lowergreen
            upper=uppergreen
        elif (p==1):
            color="red"
            lower=lowerred
            upper=upperred
        elif (p==2):
            color="blue"
            lower=lowerblue
            upper=upperblue
        
        threshin = cv2.inRange(hsv, lower, upper)
        #noise removal
        median = cv2.medianBlur(threshin,5)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))
        dilated = cv2.dilate(median, kernel)
        blur3 = cv2.bilateralFilter(dilated,9,75,75)
        median3 = cv2.medianBlur(blur3,5)
        incont, contours, hierarchy = cv2.findContours(median3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        if (len(contours)>=1):

            print len(contours)
            cv2.drawContours(inimg1, contours, -1, (0,255,0), 3)
            perimeter = cv2.arcLength(contours[0],True)
            print "p=", perimeter
            if (perimeter>=190):            #classifying shapes based on perimeter
                  shape= "square"
                  #print "square"

            elif(perimeter>=160 and perimeter<190):
                  #print "circle"                   
                  shape="circle"
            elif(perimeter<160 and perimeter >120):
                  #print "triangle"
                  shape="triangle"
            contclr = color
            break
        p=p+1

    
    return shape,contclr,len(contours)
Exemplo n.º 52
0
 def dilate(self, img, size=(5, 5), iterations=1):
     kernel = np.ones(tuple(size), np.uint8)
     return cv2.dilate(img, kernel, iterations)
Exemplo n.º 53
0
def extract(filename):
    # filename = request.form['image']
    #  target = os.path.join(APP_ROOT, 'blob/')
    # destination = "/".join([target, filename])
    # image_path = os.path.join(app.config['static/'], filename)
    #req = urllib.request.Request("{url_name}")
    # image = StringIO.StringIO(urllib.request.urlopen(req).read())
    image = test_image(filename)
    import cv2
    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.use('Agg')
    cv2.waitKey(1000)
    cv2.destroyAllWindows()
    import csv

    import pytesseract

    # read your file
    # file = upload()
    # file = send_image(file)
    # image = cv2.imread(destination, 0)
    # image.shape
    # thresholding the image to a binary image
    # thresh,img_binary = cv2.threshold(image,120,255,cv2.THRESH_BINARY |cv2.THRESH_OTSU)#inverting the image
    img_binary = cv2.adaptiveThreshold(image, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY, 11, 2)
    img_binary = 255 - img_binary  # inverting  the image
    # cv2.imwrite('cv_inverted.png', img_binary)  # Plotting the image to see the output
    plotting = plt.imshow(img_binary, cmap='gray')
    plt.show()

    # In[3]:

    # np.array(image).shape[0]

    # In[4]:

    #  np.array(image).shape[1]

    # In[5]:

    Image_size = 2000
    # length_x,width_y = image.size
    length_x = np.array(image).shape[1]
    width_y = np.array(image).shape[0]
    factor = max(1, int(Image_size // length_x))
    size = factor * length_x, factor * width_y

    # In[6]:

    # In[42]:

    # Length of kernel as 100th of total width
    kernel_length = 5
    # ernel_length = factor
    # Defining a vertical kernel to detect all vertical lines of image
    vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                (1, kernel_length))
    # Defining a horizontal kernel to detect all horizontal lines of image
    horizantal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                                  (kernel_length, 1))
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))

    # In[43]:

    image_1 = cv2.erode(img_binary, vertical_kernel, iterations=3)
    vertical_lines = cv2.dilate(image_1, vertical_kernel, iterations=3)
    # cv2.imwrite("testing-vertical.jpg", vertical_lines)  # Plotting the generated image
    plotting = plt.imshow(image_1, cmap='gray')
    plt.show()

    # In[44]:

    # Use horizontal kernel to detect and save the horizontal lines in a jpg
    image_2 = cv2.erode(img_binary, horizantal_kernel, iterations=3)
    horizontal_lines = cv2.dilate(image_2, horizantal_kernel, iterations=3)
    #  cv2.imwrite("horizontal.jpg", horizontal_lines)  # Plotting the generated image
    plotting = plt.imshow(image_2, cmap='gray')
    plt.show()

    # In[45]:

    # addweighted weighs horizantal and horizantal lines the same
    # bitwise or and bitwise_not for exclusive or and not operations

    # In[47]:

    # Combine horizontal and vertical lines in a new third image, with both having same weight.
    img_vh = cv2.addWeighted(vertical_lines, 0.5, horizontal_lines, 0.5,
                             0.0)  # Eroding and thesholding the image
    img_vh = cv2.erode(~img_vh, kernel, iterations=2)
    thresh, img_vh = cv2.threshold(img_vh, 120, 255,
                                   cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    # threshold binary or otsus binarization(mainly for bimodal)
    # thresh is thresholded value used,next is the thresholded image
    # cv2.imwrite("img_combined.jpg", img_vh)
    bitxor = cv2.bitwise_xor(image, img_vh)
    bitnot = cv2.bitwise_not(bitxor)
    # cv2.imwrite("bitnot.jpg", bitnot)
    # Plotting the generated image
    plotting = plt.imshow(bitnot, cmap='gray')
    plt.show()

    # #
    # The mode cv2.RETR_TREE finds all the promising contour lines and reconstruct
    # s a full hierarchy of nested contours. The method cv2.CHAIN_APPROX_SIMPLE
    # returns only the endpoints that are necessary for drawing the contour line.

    # In[12]:

    contours, hierarchy = cv2.findContours(img_vh, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)

    # In[13]:

    import numpy as np
    import argparse
    import imutils
    import cv2

    # In[14]:

    # get_ipython().system('pip install imutils')

    # In[15]:

    def sort_contours(cnts,
                      method="left-to-right"
                      ):  # initialize the reverse flag and sort index
        reverse = False
        i = 0  # handle if we need to sort in reverse
        if method == "right-to-left" or method == "bottom-to-top":
            reverse = True  # handle if we are sorting against the y-coordinate rather than
        # the x-coordinate of the bounding box
        if method == "top-to-bottom" or method == "bottom-to-top":
            i = 1  # construct the list of bounding boxes and sort them from top to
        # bottom
        boundingBoxes = [cv2.boundingRect(c) for c in cnts]
        (cnts, boundingBoxes) = zip(
            *sorted(zip(cnts, boundingBoxes),
                    key=lambda b: b[1][i],
                    reverse=reverse
                    ))  # return the list of sorted contours and bounding boxes
        return (cnts, boundingBoxes)

    # ###following a top-down approach for sorting contours

    # In[16]:

    # Sort all the contours by top to bottom.
    contours, boundingBoxes = sort_contours(contours, "top-to-bottom")

    # In[17]:

    # Creating a list of heights for all detected boxes
    heights = [boundingBoxes[i][3]
               for i in range(len(boundingBoxes))]  # Get mean of heights
    mean = np.mean(heights)

    #

    # In[51]:

    box = [
    ]  # Get position (x,y), width and height for every contour and show the contour on image
    for c in contours:
        x, y, w, h = cv2.boundingRect(c)
        if (h < 1000 and w < 1000):
            fimage = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0),
                                   2)
            box.append([x, y, w, h])
    plotting = plt.imshow(fimage, cmap='gray')
    plt.show()

    # In[19]:

    # len(box)

    # In[20]:

    print(box)

    # In[21]:

    # len(box)

    # In[22]:

    # classifying into rows and columns

    # In[23]:

    row = []
    column = []
    j = 0

    for i in range(len(box)):

        if (i == 0):
            column.append(box[i])
            previous = box[i]

        else:
            if (box[i][1] <= previous[1] + mean / 2):
                column.append(box[i])
                previous = box[i]

                if (i == len(box) - 1):
                    row.append(column)

            else:
                row.append(column)
                column = []
                previous = box[i]
                column.append(box[i])

    print(column)
    print(row)

    # In[24]:

    countcol = 0
    for i in range(len(row)):
        countcol = len(row[i])
        if countcol > countcol:
            countcol = countcol
    print(countcol)

    # In[ ]:

    # In[25]:

    # row[0]

    # In[ ]:

    # In[26]:

    count = 0
    for i in range(len(row)):
        count += 1
        print(row[i])
    print(count)

    # In[27]:

    # Retrieving the centers and sorting them
    center = [
        int(row[i][j][0] + row[i][j][2] / 2) for j in range(len(row[i]))
        if row[0]
    ]
    center = np.array(center)
    center.sort()

    # In[28]:

    # print(center)

    # In[29]:

    # Regarding the distance to the columns center, the boxes are arranged in respective order
    finalboxes = []
    for i in range(len(row)):
        lis = []
        for k in range(countcol):
            lis.append([])
        for j in range(len(row[i])):
            diff = abs(center - (row[i][j][0] + row[i][j][2] / 4))
            minimum = min(diff)
            indexing = list(diff).index(minimum)
            lis[indexing].append(row[i][j])
        finalboxes.append(lis)

    # In[30]:

    # finalboxes

    #
    # #psm:Set Tesseract to only run a subset of layout analysis and assume a certain form of imag
    #
    #
    # 0 = Orientation and script detection (OSD) only.
    # 1 = Automatic page segmentation with OSD.
    # 2 = Automatic page segmentation, but no OSD, or OCR. (not implemented)
    # 3 = Fully aut1matic page segmentation, but no OSD. (Default)
    # 4 = Assume a single column of text of variable sizes.
    # 5 = Assume a single uniform block of vertically aligned text.
    # 6 = Assume a single uniform block of text.
    # 7 = Treat the image as a single text line.
    # 8 = Treat the image as a single word.
    # 9 = Treat the image as a single word in a circle.
    # 10 = Treat the image as a single character.
    # 11 = Sparse text. Find as much text as possible in no particular order.
    # 12 = Sparse text with OSD.
    # 13 = Raw line. Treat the image as a single text line,
    #      bypassing hacks that are Tesseract-specific.

    # #
    #     Specify OCR Engine mode. The options for N are:
    #
    #     0 = Original Tesseract only.
    #     1 = Neural nets LSTM only.
    #     2 = Tesseract + LSTM.
    #     3 = Default, based on what is available.
    #
    #

    # In[ ]:

    # custom_config = r'--oem 3 --psm 6 outputbase digits'

    # In[57]:

    # from every single image-based cell/box the strings are extracted via pytesseract and stored in a list
    outer = []

    for i in range(len(finalboxes)):
        for j in range(len(finalboxes[i])):
            inner = ''
            if (len(finalboxes[i][j]) == 0):
                outer.append(' ')
            else:
                for k in range(len(finalboxes[i][j])):
                    y, x, w, h = finalboxes[i][j][k][0], finalboxes[i][j][k][1], finalboxes[i][j][k][2], \
                                 finalboxes[i][j][k][3]
                    finalling = bitnot[x:x + h, y:y + w]
                    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
                    border = cv2.copyMakeBorder(finalling,
                                                2,
                                                2,
                                                2,
                                                2,
                                                cv2.BORDER_CONSTANT,
                                                value=[0, 255])
                    resizing = cv2.resize(border,
                                          None,
                                          fx=2,
                                          fy=2,
                                          interpolation=cv2.INTER_CUBIC)
                    dilation = cv2.dilate(
                        resizing, kernel,
                        iterations=1)  ##size of foreground object increases
                    erosion = cv2.erode(
                        dilation, kernel, iterations=2
                    )  # Thickness of foreground object decreases
                    out = pytesseract.image_to_string(finalling)
                    if (len(out) == 0):
                        out = pytesseract.image_to_string(
                            erosion,
                            config='--psm 3 --oem 3 -c '
                            'tessedit_char_whitelist=0123456789')
                    inner = inner + " " + out
                outer.append(inner)
    arr = np.array(outer)
    dataframe = pd.DataFrame(arr.reshape(len(row), countcol))
    print(dataframe)
    data = dataframe.style.set_properties(align="left")
    return Response(
        dataframe.to_csv(),
        mimetype="text/csv",
        headers={"Content-disposition": "attachment; filename=filename.csv"})
Exemplo n.º 54
0
def main():
        frame_count = 0       #to skip few frames
        zone_count =0         #to know the zone to overlay the seedling images
        speed=50              #speed of the bot
        skip=12               #how many frames to skip
        count=0               #to knw the start of inverted plane
        count2=0              #to know the end of inverted plane
	for frameo in camera.capture_continuous(rawCapture,format='bgr',use_video_port=True,splitter_port=2,resize=(160,128)):
		frame=frameo.array    #fetching the frame

		
		gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
                blur=apply_smoothing(gray)
                
                ret,th1 = cv2.threshold(blur,5,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)#using threshold to get a binary image
                ret1,th2 = cv2.threshold(th1,127,255,cv2.THRESH_BINARY_INV)# invert the pixels of the image 
                frame_count=frame_count+1                                
                
                if(frame_count<10):                                #skiping few frames as soon as the bot has been turned on to allow the camera to heat up and stabilize
                      rawCapture.truncate(0)
                      continue
                if(zone_count==5):                                 #sending the noninverted thresholded frame for futher processing if inverted plane is detected
                      if(count2==1):
                            make=th1
                            count=1
                            skip=1000                              
                            speed=12                               #decrease the speed in the inverted plane 
                            print "inverted plane"
                            smooth1=cv2.erode(make,None,iterations=7)
                            dil=cv2.dilate(smooth1,None,iterations=11)
                            forward(speed)  
		            selected_region=select_regionIP1(dil,frame)
                            selected_region2=select_regionIP2(dil,frame)
                            selected_region3=select_regionIP3(dil,frame)      
                            cont= get_contour(selected_regionIP1)
                            cont2=get_contour(selected_regionIP2)
                            cont3=get_contour(selected_regionIP3)
                      else:
                            make=th2                             
                      
                else:
                     make=th2             #if it is black line detection send inverter thresholded frame for further processing

                     smooth1=cv2.erode(make,None,iterations=7)           #erosion and dilation operation to remove the grid lines as they would interfere in proper line detection
                     dil=cv2.dilate(smooth1,None,iterations=11)
                     
                     forward(speed)                                     #no matter what move the bot forward at lower pwm


                     #the lower part of the input frame is divided into 3 regions 
		     selected_region=select_region(dil,frame)
                     selected_region2=select_region2(dil,frame)
                     selected_region3=select_region3(dil,frame)
                     #contours are detected in each region
                     cont= get_contour(selected_region)
                     cont2=get_contour(selected_region2)
                     cont3=get_contour(selected_region3)

##########################################################################################################################################
#for every frame 3 contours are detected along the black line and its deviation from the centre of the frame is calculated and the pwm are upadted accordingly
##########################################################################################################################################
                     
                if len(cont)==1:                         #contour of region 1
                      Cx,Cy= get_centroid(cont)          #centroid of the contour to find the devition from the centre of the frame
                      area= cv2.contourArea(cont[0])     #area of the contour to indentify whether zone has been encountered
                      draw_output(frame,cont,Cx,Cy)
                      
                      frame_count +=1
                      if(frame_count>10):
                         
                           compute_pwm(Cx,frame,0.0003,0.0018,0)      #update the pwm of the motors proportional to its devition from the centroid
                           if len(cont2)==1:             #contour of region 2
                                Cx2,Cy2=get_centroid(cont2)
                                draw_output(frame,cont2,Cx2,Cy2)
                                compute_pwm(Cx2,frame,0.0002,0.0015,0)
                                area2= cv2.contourArea(cont2[0])
                            
                                if len(cont3)==1:       #contour of region 3
                                     Cx3,Cy3=get_centroid(cont3)
                                     draw_output(frame,cont3,Cx3,Cy3)
                                     area3= cv2.contourArea(cont3[0])
                                     
                                    #identifying whether the zone has been encountered from the area of the detected contour
                                     if(area>1100 and area2>1100 and area3>1000 and frame_count>skip and zone_count<5):
                                                                               
                                         zone_count =zone_count+1
                                        
                                         left.start(0)                     #stopiing at the zone
                                         right.start(0)
                                        
                                         shape,color,number=detect_shape()  # detecting shapes
                                         print shape,color,number          
                                         Region= "region"+str(zone_count)

                                         #overlaying on the plantation image 
                                         if(zone_count==1 and number>0):
                                       
                                             img=img0                  #plantation image
                                             skip=50                  
                                             speed=20
                                             numz1=number    #storing number of CM to blink the Led at the end
                                             colz1=color     #storing color of CM to blink that particular color
                                             image2=make_overlay(img,shape,color,number,Region)   #overlay image
                                             cv2.imshow("overlay",image2)
                                             cv2.waitKey(1)
                                         elif(zone_count==2 and number>0):
                                             img=image2
                                             skip=400
                                             speed=25
                                             numz2=number    #storing number of CM to blink the Led at the end
                                             colz2=color     #storing color of CM to blink that particular color

                                             image3=make_overlay(img,shape,color,number,Region)
                                             cv2.imshow("overlay",image3)
                                             cv2.waitKey(1)

                                         elif(zone_count==3 and number>0):
                                             img=image3
                                             speed=12
                                             skip=20
                                             numz3=number    #storing number of CM to blink the Led at the end
                                             colz3=color     #storing color of CM to blink that particular color

                                             image4=make_overlay(img,shape,color,number,Region)
                                             cv2.imshow("overlay",image4)
                                             cv2.waitKey(1)

                                         elif(zone_count==4 and number>0):
                                             img=image4
                                             speed=50
                                             skip=50
                                             numz4=number    #storing number of CM to blink the Led at the end
                                             colz4=color     #storing color of CM to blink that particular color

                                             image5=make_overlay(img,shape,color,number,Region)
                                             cv2.imshow("overlay",image5)
                                             cv2.waitKey(1)
                                         if(number>0 or (zone_count>4 and number==0)):
                                             forward(40)
                                             time.sleep(0.1)
                                         else:
                                             zone_count=zone_count-1            #if no color markers are detected
                                         left.start(0)
                                         right.start(0)
                                         rawCapture.truncate(0)
                                         still.truncate(0)
                                         frame_count=0
                                         continue

                                         
                                     #if all the centroid of the three contours detected on the line are in the centre of the frame move a bit faster   
                                     if (((Cx<(rowsp/2)+5) and (Cx>(rowsp/2)-5)) and ((Cx2<(rowsp/2)+5) and (Cx2>(rowsp/2)-5)) and ((Cx3<(rowsp/2)+5) and (Cx3>(rowsp/2)-5))):
                                     	compute_pwm(Cx3,frame,0.0001,0.002,1)
                                     else:
                                     	compute_pwm(Cx3,frame,0.0001,0.002,0)


                
                elif (len(cont)>1):                      
                      if(zone_count==5 and count==0):       #if inverted plane is encountered there will be two contours detected
                          left.start(0)
                          right.start(0)
                          time.sleep(0.1)
                          count2=1
                          print "change count2"
                          forward(10)
                          time.sleep(0.8)
                          turn_softright(90)
                          time.sleep(0.1)
                          left.start(0)
                          right.start(0)
                          rawCapture.truncate(0)
                          continue
                      elif(zone_count==5 and count==1 and frame_count>skip):   #when the inverted plane ends again two contours are detected
                                                                               #which indicated shed has been reached

                          left.start(0)
                          right.start(0)
                          print "SHED reached"
                          forward(20)
                          print "zone1",colz1,numz1
                          print "zone2",colz2,numz2
                          print "zone3",colz3,numz3
                          print "zone4",colz4,numz4
                          time.sleep(2)
                          left.start(0)
                          right.start(0)
                          blink_end(colz1,1)
                          blink_end(colz2,1)
                          blink_end(colz3,1)
                          blink_end(colz4,1)
                          while(1):
                                left.start(0)
                                right.start(0)      #stop at the shed permanently
                      elif(zone_count<5):
                          turn_softleft(10)
                          time.sleep((abs((rowsp/2)-50)*2)*0.0008)
                          print("more contour")
                      turn_softleft(10)
                      time.sleep((abs((rowsp/2)-50)*3)*0.0008)
   
                      print len(cont)
                else:                                   #some random case 
                      if(zone_count==5):                #might be due to reflection or shadows
                            forward(20)                 
                            time.sleep(0.1)
                            turn_softleft(20)           #moving the bot slightly left and right insuch case worked for me
                            time.sleep(0.1)
                            left.start(0)
                            right.start(0)
                            rawCapture.truncate(0)
                            continue
                      turn_softright(10)
                       
                      time.sleep((abs((rowsp/2)-50)*2)*0.0009)
                      print("in some random case")
               
                rawCapture.truncate(0)
      
                left.start(0)
                right.start(0)




		key=cv2.waitKey(1) & 0xFF
		if key ==ord("q"):
                        rawCapture.truncate(0)
                        camera.stop_preview()
                        camera.close()
                        GPIO.cleanup()                        
			break

	cv2.destroyAllWindows()
Exemplo n.º 55
0
import cv2
import numpy as np

class Segmenter(object):
    def __init__(self):
        self._mask_32S = None
        self._waterImg = None
# 将掩膜转化为CV_32S
    def setMark(self, mask):
        self._mask_32S = np.int32(mask)
# 进行分水岭操作
    def waterProcess(self, img):
        self._waterImg = cv2.watershed(img, self._mask_32S)
# 获取分割后的8位图像
    def getSegmentationImg(self):
        segmentationImg = np.uint8(self._waterImg)
        return segmentationImg
# 处理分割后图像的边界值
    def getWaterSegmentationImg(self):
        waterSegmentationImg = np.copy(self._waterImg)
        waterSegmentationImg[self._waterImg == -1] = 1
        waterSegmentationImg = np.uint8(waterSegmentationImg)
        return waterSegmentationImg

# 将分水岭算法得到的图像与源图像合并 实现抠图效果
    def mergeSegmentationImg(self, waterSegmentationImg, isWhite=False):
        _, segmentMask = cv2.threshold(waterSegmentationImg, 250, 1,
                                       cv2.THRESH_BINARY)
        segmentMask = cv2.cvtColor(segmentMask, cv2.COLOR_GRAY2BGR)
        mergeImg = cv2.multiply(img, segmentMask)
    def process(self, inframe, outframe):

        def tellRobot(bbox, out_center_x, out_center_y, serial_format="XY"):
            if bbox is None:
                jevois.sendSerial("stop")
            else:
                box_center_x, box_center_y = bbox[0]+bbox[2]/2, bbox[1]+bbox[3]/2
                if serial_format == "XY":
                    if out_center_x < box_center_x:
                        move_x = box_center_x - out_center_x
                    elif box_center_x < out_center_x:
                        move_x = out_center_x - box_center_x
                    elif box_center_x == out_center_x:
                        move_x = 0
                    if out_center_y < box_center_y:
                        move_y = box_center_y - out_center_y
                    elif box_center_y < out_center_y:
                        move_y = out_center_y - box_center_y
                    elif box_center_y == out_center_y:
                        move_y = 0
                    if move_x < 100:
                        move_x = 100
                    if move_y < 100:
                        move_y = 100                        
                    jevois.sendSerial("smoothmove {} {}".format(int(move_x), int(move_y)))
                else:
                    jevois.sendSerial("Invalid Serial Format")

        img = inframe.getCvBGR()
        frameHeight = img.shape[0]
        frameWidth = img.shape[1]
        out_center_x, out_center_y = frameWidth/2, frameHeight/2

        # Set the frame rate
        time.sleep(0.2)

        # Set the serial output format
        serial_format = "XY" #Options: "Belts", "XY"

        # Preprocess the input
        blurred = cv2.bilateralFilter(img,9,75,75)
        # blurred = cv2.GaussianBlur(img, (21, 21), 0)
        ret, thresh = cv2.threshold(blurred, 50, 255, cv2.THRESH_BINARY)
        hsv = cv2.cvtColor(thresh, cv2.COLOR_BGR2HSV)
        mask = np.zeros((thresh.shape[0], thresh.shape[1], 3), np.uint8)

        # Setup the tracker
        tracker = cv2.TrackerKCF_create()
        bbox = None

        # Filter the desired color range
        greenLower = (29, 86, 6)
        greenUpper = (64, 255, 255)
        redLower = (0,10,70)
        redUpper = (40,255,255)
        image = cv2.inRange(hsv, redLower, redUpper)
        image = cv2.erode(image, None, iterations=2)
        image = cv2.dilate(image, None, iterations=2)

        # Find the biggest contour
        contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
        if contours:
            biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
            x,y,w,h = cv2.boundingRect(biggest_contour)
            box_center_x, box_center_y = x+w/2, y+h/2
            cv2.drawContours(mask, [biggest_contour], -1, 255, -1)

            # Track the biggest contour
            if bbox is None:
                bbox = (x, y, w, h)
                ok = tracker.init(img, bbox)
                cv2.rectangle(mask,(x,y), (x+w, y+h), (0,255,0), 2)                
            else:
                ok, bbox = tracker.update(img)
                if ok:
                    p1 = (int(bbox[0]), int(bbox[1]))
                    p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                    box_center_x, box_center_y = bbox[0]+bbox[2]/2, bbox[1]+bbox[3]/2
                    cv2.rectangle(mask,p1, p2, (0,255,0), 2)
                else:
                    bbox = None
        else:
            bbox = None
        cv2.putText(mask, "BBOX: " + str(bbox), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50,170,50), 2)

        # Tell the robot what to do
        tellRobot(bbox, out_center_x, out_center_y)

        # Organize the visual output
        toprow = np.hstack((img, blurred))
        bottomrow = np.hstack((thresh, mask))
        outimg = np.vstack((toprow, bottomrow))
        outframe.sendCv(outimg)
Exemplo n.º 57
0
        maskname = "/home/pi/junk/mask" + str(imageCounter) + ".png"

        flipped = cv2.flip(inimg, 0)  # flip about the x axis
        cv2.imwrite(savefilename, flipped)

        #trim the image.

        #cropped=flipped[imageHeight/2:imageHeight-1,0:imageWidth-1]
        cropped = flipped

        #try it without blurring first
        #blurred=cv2.medianBlur(inimg,7)
        imghls = cv2.cvtColor(cropped, cv2.COLOR_BGR2HLS)
        mask = cv2.inRange(imghls, lowColor, highColor)
        erosion = cv2.erode(mask, erosionKernel, iterations=1)
        mask = cv2.dilate(erosion, erosionKernel, iterations=1)
        cv2.imwrite(maskname, mask)

        # Find contours
        contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)
        ballfound = False
        ballTheta = -100

        #filter the contours
        # ratio of width<height<2
        # ratio of height-width <2
        # At least 1000 pixels
        # High numbers of points
        biggestContourArea = 0
        biggestContour = None
Exemplo n.º 58
0
def dilate_cv2(image, iterations=1):
    kernel = np.ones((5, 5), np.uint8)
    return cv2.dilate(image, kernel, iterations=iterations)
Exemplo n.º 59
0
"""
##cutoff of pic ,get RGBvalues
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
                             cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
# draw a bounding box arounded the detected barcode and display the image
cv2.drawContours(img, [box], -1, (0, 255, 0), 3)
cv2.imshow("Image", img)
cv2.imwrite("contoursImage2.jpg", img)
#cv2.waitKey(0)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
Exemplo n.º 60
0
    upper_saturation = cv2.getTrackbarPos("upper_saturation", "bars")
    upper_value = cv2.getTrackbarPos("upper_value", "bars")
    lower_value = cv2.getTrackbarPos("lower_value", "bars")
    lower_hue = cv2.getTrackbarPos("lower_hue", "bars")
    lower_saturation = cv2.getTrackbarPos("lower_saturation", "bars")

    # Kernel to be used for dilation
    kernel = numpy.ones((3, 3), numpy.uint8)

    upper_hsv = numpy.array([upper_hue, upper_saturation, upper_value])
    lower_hsv = numpy.array([lower_hue, lower_saturation, lower_value])

    mask = cv2.inRange(inspect, lower_hsv, upper_hsv)
    mask = cv2.medianBlur(mask, 3)
    mask_inv = 255 - mask
    mask = cv2.dilate(mask, kernel, 5)

    # The mixing of frames in a combination to achieve the required frame
    b = frame[:, :, 0]
    g = frame[:, :, 1]
    r = frame[:, :, 2]
    b = cv2.bitwise_and(mask_inv, b)
    g = cv2.bitwise_and(mask_inv, g)
    r = cv2.bitwise_and(mask_inv, r)
    frame_inv = cv2.merge((b, g, r))

    b = init_frame[:, :, 0]
    g = init_frame[:, :, 1]
    r = init_frame[:, :, 2]
    b = cv2.bitwise_and(b, mask)
    g = cv2.bitwise_and(g, mask)