Example #1
0
def deteccion(lower, upper, tam_max_area):
    global img
    #Analisis de la imagen
    img_hsv = cv2.cvtColor(img[200:400, 0:200], cv2.COLOR_BGR2HSV)
    #Transformacion morfologica para detectar mejor
    kernel_cinco = np.ones((5,5),np.uint8);
    mascara = cv2.inRange(img_hsv, lower, upper)
    cv2.erode(mascara,kernel_cinco,iterations = 2)
    cv2.dilate(mascara,kernel_cinco,iterations = 2)
    mascara = cv2.morphologyEx(mascara, cv2.MORPH_OPEN, kernel_cinco)
    mascara = cv2.morphologyEx(mascara, cv2.MORPH_CLOSE, kernel_cinco)

    output = cv2.bitwise_and(img[200:400, 0:200], img[200:400, 0:200], mask = mascara)
    ret, thresh = cv2.threshold(mascara,127,255,0)
    contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    #Busqueda del contorno mas grande, si supera un tamano establecido sera considerado punto caliente
    if len(contours) > 0:
        for index in range(len(contours)):
            area = cv2.contourArea(contours[index])
            if area >= tam_max_area:
                if mostrar_pantalla:
                    imag = img
                    cv2.rectangle(imag,(0,200),(200,400),(0,255,0),3)
                    cv2.drawContours(imag[200:400, 0:200], contours, index, (255,0,0), -1)
                    cv2.imshow('Img-pro', imag)
                    cv2.imshow('Procesada', output)
                return True
    return False
def left_stop_at_black():
    cap = cv2.VideoCapture(0)
    time.sleep(1)
    GPIO.setmode (GPIO.BCM)
    GPIO.setwarnings (False)
    GPIO.setup (24, GPIO.OUT)
    GPIO.setup (27, GPIO.OUT)
    PWML1 = GPIO.PWM (24,3)
    PWMR = GPIO.PWM (27,3)
    PWML1.start(0)
    PWMR.start(0)
    counter = 0
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2,2))
    while(1):
        ret, img = cap.read()
        PWMR.ChangeDutyCycle(100)
        PWML1.ChangeDutyCycle(100)
        
        ret,thresh = cv2.threshold(img,130,255,cv2.THRESH_BINARY)
        dilation = cv2.erode(thresh,kernel,iterations = 1)
        thresh = cv2.erode(dilation, kernel, iterations = 1)

        b1,g1,r1 = thresh[240,120]
        if b1==255:
            counter+=1
            continue
        if counter>0:
            PWML1.stop()
            PWMR.stop()
            break
            
    GPIO.cleanup()
    cap.release()
    return
def segment(frame):
    """This Function Segments Swimmers from the image, and returns the same frame after the mask"""
    #Blur
    kernel = np.ones((5,5),np.uint8)
    finekernel = np.ones((3,3),np.uint8)
    # define range of blue color in HSV
    lower_blue = np.array([90,50,50])
    upper_blue = np.array([130,255,255])

    # Convert BGR to HSV
    hsvimg = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Threshold the HSV image to get only blue colors
    imgMask = cv2.inRange(hsvimg, lower_blue, upper_blue)

    #morphological opening (removes small objects from the foreground)
    #Erode and Expand the edges of the mask to eliminate small artifacts
    imgMask = cv2.erode(imgMask, kernel, iterations=1)
    imgMask = cv2.dilate(imgMask, kernel, iterations=1)

    #morphological closing
    #Erode and Expand the edges to smooth edges
    imgMask = cv2.dilate(imgMask, kernel, iterations=1)
    imgMask = cv2.erode(imgMask, kernel, iterations=1)

    #InvertMask
    imgMask = cv2.bitwise_not(imgMask,imgMask)
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= imgMask)

    return res
Example #4
0
def find_OFF_diods(image, dirname=None, filename=None):
    print "finding OFF"
    _, _, red = cv2.split(image)
    #if dirname and filename:
    #    save_image("red_%s" % filename, dirname, red)
    
    element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    red_eq = cv2.equalizeHist(red)
    val, thr = cv2.threshold(red_eq, 110, 255, cv.CV_THRESH_BINARY)
    thr_er = cv2.erode(thr, element, iterations=3)

    #if dirname and filename:
    #    save_image("off_thr_%s" % filename, dirname, thr)
    
    #save_image("thr_%s" % filename, dirname, thr)
    di = cv2.dilate(thr, element, iterations=10)
    er = cv2.erode(di, element, iterations=12)
    #if dirname and filename:
    #    save_image("off_erode_%s" % filename, dirname, er)
    
    contours, _ = cv2.findContours(er, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    diod_contours = find_diods_contours(contours)
    #for d_c in diod_contours:
    #    cv2.circle(image, d_c[0], d_c[1], cv.RGB(255,0,0), 10)

    return [d[0] for d in diod_contours]
Example #5
0
def track_green(frame):
    #Convert the current frame to HSV
    #hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    #Define the threshold for finding a blue object with hsv
    lower_blue = np.array([130,170,100])
    upper_blue = np.array([190,230,160])

    #Create a binary image, where anything blue appears white and everything else is black
    mask = cv2.inRange(frame, lower_blue, upper_blue)

    #Get rid of background noise using erosion and fill in the holes using dilation and erode the final image on last time
    element = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2))
    mask = cv2.erode(mask,element, iterations=2)
    mask = cv2.dilate(mask,element,iterations=2)
    mask = cv2.erode(mask,element)
    
    #Create Contours for all blue objects
    contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    maximumArea = 0
    bestContour = None
    for contour in contours:
        currentArea = cv2.contourArea(contour)
        if currentArea > maximumArea:
            bestContour = contour
            maximumArea = currentArea
    #Create a bounding box around the biggest blue object
    [x,y,w,h] = [0,0,0,0]
    if bestContour is not None:
        x,y,w,h = cv2.boundingRect(bestContour)
	location = [(x+w)/2, (y+h)/2]
	print "orange: " + str(location)
	urllib.urlopen("http://54.218.43.192/robot_tag/2/add_coords/" + str(location[0]).zfill(3) + str(location[1]).zfill(3) + "/")
    return [x,y,w,h]
Example #6
0
    def find_stones(self):
        '''
        Finds stones in thresholded black and white images.
        Results stored in self.black and self.white.
        '''
        # Smooth, aligned gray image
        sag = cv2.bilateralFilter(self.alignedgray, 3, 20, 20)

        kernel = np.ones((3,3),np.uint8)
        
        #mask1 = cv2.inRange(sag, 0, 85)
        mask1 = cv2.inRange(sag, 100, 255)
        mask2 = cv2.inRange(sag, 140, 255)

        self.blackrange = cv2.bitwise_not(sag)
        ret, self.blackrange = cv2.threshold(self.blackrange, 210, 255, 0)
        self.blackrange = cv2.erode(self.blackrange,kernel,iterations=3)
        self.blackrange = cv2.dilate(self.blackrange,kernel,iterations=3)

        #self.whiterange = cv2.bitwise_and(sag, sag, mask=mask2)
        ret, self.whiterange = cv2.threshold(sag, 135, 255, 0)
        self.whiterange = cv2.erode(self.whiterange,kernel,iterations=2)
        self.whiterange = cv2.dilate(self.whiterange,kernel,iterations=2)

        self.black = cv2.HoughCircles(self.blackrange,cv2.HOUGH_GRADIENT,1,16,param1=30,param2=7,minRadius=4,maxRadius=13)[0] 
        self.white = cv2.HoughCircles(self.whiterange,cv2.HOUGH_GRADIENT,1,16,param1=30,param2=7,minRadius=4,maxRadius=13)[0]
  def classifyFrame(self):
    if not self.depthRosImg or not self.rgbRosImg:
      return None

    origImg = or_util.toCvImg(self.rgbRosImg, 'bgr8')
    depthImg = or_util.toCvImg(self.depthRosImg, 'passthrough')

    denoised = cv2.GaussianBlur(origImg, (2*self.blurSize+1, 2*self.blurSize+1), self.sigmaX / 10.)

    structuringElement = img_manip.getStructuringElement(cv2.MORPH_ELLIPSE, self.morphSize)
    morphed = denoised
    morphed = cv2.dilate(morphed, structuringElement)
    morphed = cv2.erode(morphed, structuringElement)
    morphed = cv2.erode(morphed, structuringElement)
    morphed = cv2.dilate(morphed, structuringElement)

    hsvImg = cv2.cvtColor(morphed, cv2.COLOR_BGR2HSV)
    detector = cv2.SimpleBlobDetector(self.blobParams)
    keypoints = detector.detect(hsvImg)

    objects = []
    for kp in keypoints:
      classes = self.classifyOne(depthImg, origImg, hsvImg, kp)
      objects.append(classes)

    self.tracker.feed(keypoints, map(lambda obj: OBJECTS_DICT[obj], objects))
    trackerKeypoints, trackerVotes = self.tracker.voteAll()
    trackerObjects = map(lambda vote: OBJECTS_LIST[vote], trackerVotes)

    out = drawStuff(morphed, trackerKeypoints, trackerObjects)
    if self.show:
      cv2.imshow(WINDOW_NAME, out)
      cv2.waitKey(1)

    return trackerObjects, out
Example #8
0
    def findTag(self):

        # Take each frame
        _, frame = self.cap.read()
       
        #lower_blue = np.array([142, 160,120])
        #upper_blue = np.array([179,240,200])
        upper_blue = self.upper
        lower_blue = self.lower
        # Convert BGR to HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # Threshold the HSV image to get only red colors
        mask = cv2.inRange(hsv, lower_blue, upper_blue)
        cv2.erode(mask, mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
        cv2.dilate(mask, mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
        
        cv2.dilate(mask, mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
        cv2.erode(mask, mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
        
        _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        xVals = []
        for cnt in contours:
            
            M = cv2.moments(cnt)
            marea = int(M['m00'])
            if(marea > 100):
                mx = int(M['m10']/M['m00'])
                xVals.append(mx)
            
        return xVals
Example #9
0
def find_boxes(img):
    """
    Detects box(square) shapes in the input image.
    :param img: input image.
    :return: image with outlines of boxes from the original image.
    """
    kernel_length = np.array(img).shape[1] // 75
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # Detect vertical and horizontal lines in the image
    img_temp1 = cv2.erode(img, verticle_kernel, iterations=2)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=2)
    img_temp2 = cv2.erode(img, hori_kernel, iterations=2)
    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=2)

    # Weighting parameters, this will decide the quantity of an image to be added to make a new image.
    alpha = 0.5
    beta = 1.0 - alpha
    # Add the vertical and horizontal lines images to get a third image as summation.
    img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
    img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
    (_, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
    return img_final_bin
Example #10
0
    def trackRobot(self, imagePath):
        '''this function track the robot and return its coordinates'''
        img = cv2.imread(imagePath)
        img = cv2.flip(img, 1)
        img = cv2.flip(img, 0)

        # convert into hsv 
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # Find mask that matches 
        green_mask = cv2.inRange(hsv, np.array((50., 30., 0.)), np.array((100., 255., 255.)))
        green_mask = cv2.erode(green_mask, None, iterations=2)
        green_mask = cv2.dilate(green_mask, None, iterations=2)

        green_cnts = cv2.findContours(green_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        green_c = max(green_cnts, key=cv2.contourArea)

        # fit an ellipse and use its orientation to gain info about the robot
        green_ellipse = cv2.fitEllipse(green_c)

        # This is the position of the robot
        green_center = (int(green_ellipse[0][0]), int(green_ellipse[0][1]))

        red_mask = cv2.inRange(hsv, np.array((0., 100., 100.)), np.array((80., 255., 255.)))
        red_mask = cv2.erode(red_mask, None, iterations=2)
        red_mask = cv2.erode(red_mask, None, iterations=2)

        red_cnts = cv2.findContours(red_mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
        red_c = max(red_cnts, key=cv2.contourArea)

        red_ellipse = cv2.fitEllipse(red_c)
        red_center = (int(red_ellipse[0][0]), int(red_ellipse[0][1]))


        return green_center, red_center   
Example #11
0
    def find_position ( self, frame ):
        pos_x = self.last_x
        pos_y = self.last_y

        hsv_img = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
        mask1 = cv2.inRange( hsv_img, self.lower_red_l, self.lower_red_h )
        mask2 = cv2.inRange( hsv_img, self.upper_red_l, self.upper_red_h )
       
        mask = mask1 + mask2       

        mask = cv2.erode( mask, self.kernel )
        mask = cv2.dilate( mask, self.kernel )

        mask = cv2.dilate( mask, self.kernel ) 
        mask = cv2.erode( mask, self.kernel )

        o_moments = cv2.moments( mask )
        d_m01 = o_moments['m01']
        d_m10 = o_moments['m10']
        d_area = o_moments['m00']
        print "MOMENTS " + str(d_m01) + " " + str(d_m10) + " " + str(d_area)

        if d_area > 10000:
            pos_x = int(d_m10 / d_area)
            pos_y = int(d_m01 / d_area)
            print "[" + str(pos_x) + " , " + str(pos_y) + "]"
        return pos_x, pos_y 
Example #12
0
def detect(capture, prev_images):
    # Capture a new frame
    new_frame = capture.grab_frame()

    # Not enough frames: no detection, just store this one
    if len(prev_images) < 2:
        return None, None, new_frame, None

    # Everything to grayscale
    prev_images = [prev_images[1], prev_images[0], new_frame]
    prev_images = [cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                   for prev_frame in prev_images]
    prev_frame, current_frame, next_frame = prev_images

    # Diff
    d1 = cv2.absdiff(prev_frame, next_frame)
    d2 = cv2.absdiff(next_frame, current_frame)
    motion = cv2.bitwise_and(d1, d2)

    # Threshold & erode
    cv2.threshold(motion, config.DIFF_THRESHOLD, 255, cv2.THRESH_BINARY,
                  dst=motion)
    cv2.erode(motion, kernel_ero, dst=motion)

    # Find and count changes
    number_of_changes, location, std_dev = detect_motion(motion)

    return number_of_changes, std_dev, new_frame, location
Example #13
0
    def process(self, image):
        foreground = getfg(image)
        element = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
        cv2.erode(foreground, element, foreground, iterations=1)
        cv2.dilate(foreground, element, foreground, iterations=1)

        return foreground
Example #14
0
    def get_corners(self, img):
        """ img: np.array, -> np.array

        This function will find corners. It does:
            dilate with cross
            erode with diamond
            dilate with X
            erode with square
            Corners are obtained by differentiating the two closed images
        """
        img_1 = cv2.dilate(img, self._kernel_cross)
        print('cross dilate')
        # ava.cv.utl.show_image_wait_2(img_1) # ------------------

        img_1 = cv2.erode(img_1, self._kernel_diamond)
        print('erode diamond')
        # ava.cv.utl.show_image_wait_2(img_1) # ------------------

        img_2 = cv2.dilate(img, self._kernel_x)
        print('x dilate')
        # ava.cv.utl.show_image_wait_2(img_2) # ------------------

        img_2 = cv2.erode(img_2, self._kernel_5x5)
        print('erode square')
        # ava.cv.utl.show_image_wait_2(img_2) # ------------------

        img_1 = cv2.absdiff(img_2,img_1)
        #threshold
        img_1 = self.apply_threshold(img_1)

        return img_1
Example #15
0
    def get_border_mask(self):
        outer_erosion = cv2.erode(self._shred_mask, self._erode_kernel,
                                  iterations=self._erode_iterations_outer)
        inner_erosion = cv2.erode(self._shred_mask, self._erode_kernel,
                                  iterations=self._erode_iterations_inner)

        return outer_erosion - inner_erosion
    def erode(self, size=(5, 5), iterations=1, binary_in=False):
        """
        morphological erode
        Parameters
        ----------
        size : 'tuple'
            kernel size
        iterations : 'int'
            number of times to run kernel
        binary_in : 'boole'
            run on binary

        Returns
        -------
        self.img_b : 'array'
            new binary array
        self.img : 'array'
            new img array

        """
        kernel = np.ones(size, np.uint8)

        if binary_in:
            erosion = cv2.erode(self.img_b, kernel, iterations=iterations)
            self.img_b = erosion
        else:
            erosion = cv2.erode(self.img, kernel, iterations=iterations)
            self.img = erosion
Example #17
0
    def motionDetected(self, new_frame):
        frame = self.preprocessInputFrame(new_frame)

        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, (11, 11), 0)

        if self.prevPrevFrame is None:
            self.prevPrevFrame = gray
            return False

        if self.prevFrame is None:
            self.prevFrame = gray
            return False

        cv.normalize(gray, gray, 0, 255, cv.NORM_MINMAX)

        frameDiff = self.diffImg(self.prevPrevFrame, self.prevFrame, gray)
        ret1, th1 = cv.threshold(frameDiff, 10, 255, cv.THRESH_BINARY)

        cv.dilate(th1, None, iterations=15)
        cv.erode(th1, None, iterations=1)

        delta_count = cv.countNonZero(th1)

        cv.imshow("frame_th1", th1)

        self.prevPrevFrame = self.prevFrame
        self.prevFrame = gray

        ret = delta_count > self.threshold

        if ret:
            self.updateMotionDetectionDts()

        return ret
 def detectRover(self, argFrame):
     frame    = self.frame
     hsvFrame = self.frame
     thresh   = self.frame[:,:,0]
     rGreen = (38,67,155,198,0,255)
     rPink = (165,182,155,192,0,255)
     hsvFrame  = cv2.cvtColor(self.frame.copy(), cv2.COLOR_BGR2HSV)
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rGreen[0],rGreen[2],rGreen[4]]),np.array([rGreen[1],rGreen[3],rGreen[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     greenPt = (int((x+x+w)/2),int((y+y+h)/2))
     thresh = cv2.inRange(hsvFrame.copy(),np.array([rPink[0],rPink[2],rPink[4]]),np.array([rPink[1],rPink[3],rPink[5]]))
     thresh = cv2.medianBlur(thresh.copy(),5)
     thresh = cv2.erode(thresh.copy(), erodeElem)
     #thresh = cv2.erode(thresh.copy(), erodeElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     thresh = cv2.dilate(thresh.copy(), dilateElem)
     _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
     if len(contours) != 1:
         return -1
     (x,y,w,h) = cv2.boundingRect(contours[0])
     pinkPt = (int((x+x+w)/2),int((y+y+h)/2))
     self.roverPos = (int((greenPt[0]+pinkPt[0])/2),int((greenPt[1]+pinkPt[1])/2))
     angle = getAngle(pinkPt[0],pinkPt[1],greenPt[0],greenPt[1])
     self.roverHeading = 360+angle[2]*-1
     return greenPt, pinkPt
Example #19
0
    def skin_blobs(self, img, det_face_hsv, face_rect, masked_img):
        """
        Do blob morphology stuff on faces. Perform a mask,
        Then dilate and erode to make them into more coherent blobs.

        :param img: BGR image from webcam
        :param det_face_hsv: hsv image of the face from the previous detection
        :param face_rect: non-normalized dimensions of face rectangle (left, top, cols, rows)
        :return: 2D array, black and white image of skin blobs
        """

        #open and close
        # kernel size and shape are more art than science
        # using a small kernel to erode noise and a large on to
        # to dilate since I have more false negatives with skin
        # detection than I do false positives.
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
        kernel_small = kernel & np.transpose(kernel) #symmetry
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))
        kernel_big = kernel & np.transpose(kernel) #symmetry
        blob_img = cv2.erode(masked_img, kernel_small)
        blob_img = cv2.dilate(blob_img, kernel_big)
        blob_img = cv2.erode(blob_img, kernel_small)
        blob_img = cv2.dilate(blob_img, kernel_big)
        return blob_img
    def _findCorner(self):
        """
        Find the corners by using some morphological operator and then use
        the connectedComponents in order to isolate them.
        """
        # a few morphological operator in order to find corners
        self.img_corner = cv2.dilate(self.img, CheckboxConstant.cross)
        self.img_corner = cv2.erode(self.img_corner, CheckboxConstant.diamond)
        temp = cv2.dilate(self.img, CheckboxConstant.xelem)
        temp = cv2.erode(temp, CheckboxConstant.rect)
        self.img_corner = cv2.absdiff(temp, self.img_corner)

        # threshold
        ret, self.img_corner = cv2.threshold(255-self.img_corner, 190, 255, 0)
        # find the different area
        temp = connectedComponents(self.img_corner, connectivity=8)
        N = np.max(temp)
        # loop over each region except background
        for n in range(1, N):
            # average position of the corner
            index = np.array((0, 0), int)
            count = 0
            for i in range(temp.shape[0]):
                for j in range(temp.shape[1]):
                    if temp[i, j] == n:
                        index += (i, j)
                        count += 1
            if count != 0:
                index /= count
                self.corners.append((index[0], index[1]))
Example #21
0
def get_mask_YCbCr(im, mask_YCbCr):
    minH = 80;    maxH = 220 
    minS = 65;    maxS = 220 
    minV = 65;    maxV = 220 

    tmp = cv2.cvtColor(im, cv.CV_BGR2YCrCb) # RGB → YCrCb
    
    p_src = cv2.split(tmp) # 元画像RGBのimを。HVSの場合はtmpとする。修正HSVとする場合はimでよい。
    p_dst = cv2.split(tmp) # マスク用に変更する画像

    # 修正HSVではなく普通のHSVで使うやつ
    H = p_src[0]    # 0から180 
    S = p_src[1]
    V = p_src[2] 

    p_dst[0] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[1] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    p_dst[2] = 255*(minH <= H)*(H <= maxH)*(minS <= S)*(S <= maxS)*(minV <= V)*(V <= maxV)
    
    mask_YCbCr[:,:,0] = p_dst[0]; mask_YCbCr[:,:,1] = p_dst[1]; mask_YCbCr[:,:,2] = p_dst[2];

    # 細かいノイズを取り除く
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    cv2.dilate( np.uint8(mask_YCbCr), element )
    cv2.erode(np.uint8(mask_YCbCr), element)
Example #22
0
def ErodeTrick(im):
    cv2.erode(im, im, None, 3)
    cv2.dilate(im, im, None, 3)
    
    cv2.dilate(im, im, None, 3)
    cv2.erode(im, im, None, 3)
    return im
    def process_rgb(self, rgb_img):
        frame_gray = cv2.cvtColor(rgb_img, cv.CV_RGB2GRAY)
        # gray_blurred = cv2.GaussianBlur(frame_gray, (9, 9), 0)
        gray_blurred = cv2.medianBlur(frame_gray, 5)
        # gray_blurred = cv2.bilateralFilter(frame_gray, 8, 16, 4)
        # cv2.imshow("gray_blurred", gray_blurred)


        gray_filter = cv2.adaptiveThreshold(gray_blurred,
                                            255.0,
                                            # cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
                                            cv.CV_ADAPTIVE_THRESH_MEAN_C,
                                            cv.CV_THRESH_BINARY,
                                            9,  # neighbourhood
                                            9)
        cv2.bitwise_not(gray_filter, gray_filter)
        kernel = np.ones((3, 3), 'uint8')
        # gray_erode = gray_filter
        gray_erode = cv2.erode(gray_filter, kernel)

        kernel2 = np.ones((5, 5), 'uint8')

        gray_erode = cv2.dilate(gray_erode, kernel2)
        gray_erode = cv2.erode(gray_erode, kernel)

        size = rgb_img.shape
        size = (size[1] - 1, size[0] - 1)

        cv2.rectangle(gray_erode, (0, 0), size,
                      0,  # color
                      20,  # thickness
                      8,  # line-type ???
                      0)  # random shit

        return gray_erode
Example #24
0
def cropLines(crop1, crop2):
    kernel = np.ones((11,11),np.uint8)
    grayCrop1 = cv2.cvtColor(crop1, cv2.COLOR_BGR2GRAY)
    grayCrop1 = cv2.GaussianBlur(grayCrop1, (111, 111), 1)
    grayCrop1 = cv2.erode(grayCrop1, kernel, iterations=1)
    cannyImg = cv2.Canny(grayCrop1, 100, 200)
    lines = cv2.HoughLinesP(cannyImg, 1, np.pi/180,
                            threshold = 5,
                            minLineLength = 300, maxLineGap = 70)
    cv2.imshow("bang",cannyImg)
    cv2.waitKey(0)
    for lineSet in lines:
        for line in lineSet:
            cv2.line(crop1, (line[0], line[1]), (line[2], line[3]), (255, 255, 0))

    grayCrop2 = cv2.cvtColor(crop2, cv2.COLOR_BGR2GRAY)
    grayCrop2 = cv2.GaussianBlur(grayCrop2, (111, 111), 1)
    grayCrop2 = cv2.erode(grayCrop2, kernel, iterations=1)
    cannyImg2 = cv2.Canny(grayCrop2, 100, 200)

    lines2 = cv2.HoughLinesP(cannyImg2, 1, np.pi/180,
                            threshold = 5,
                            minLineLength = 300, maxLineGap = 70)

    for lineSet2 in lines2:
        for line2 in lineSet2:
            cv2.line(crop2, (line2[0], line2[1]), (line2[2], line2[3]), (255, 255, 0))

    cv2.imshow("crop1", crop1)
    cv2.imshow("crop2", crop2)
    cv2.waitKey(0)
    return lines, lines2
Example #25
0
    def traceBin(self, imgBin, color):
        #tiny erosion reduces color overlap and 
        #gets rid of tiny points that can occur on middle layers
        imgBin = cv2.erode(imgBin,kernel5)

        size = np.shape(imgBin)

        #pad a border around the binary image. This will allow the erosions to
        #erode away from the edge of the canvas
        imgBinPadded = zeros((size[0]+2, size[1]+2), dtype=np.uint8)
        imgBinPadded[1:-1,1:-1] = imgBin

        while True:
            bmp = potrace.Bitmap(imgBinPadded)    #bitmap in preparation for potrace
            path = bmp.trace()  #trace it
            if (path.curves == []): #check for blank
                break
            for curve in path:
                #tessellate aka break into line segments
                #yes, their function is mispelled
                tessellation = curve.tesselate() #uses the default 'adaptive' interpolation
                #and now put coords into the array
                if (color == 0):
                    self.addArray0(tessellation)
                elif (color == 1):
                    self.addArray1(tessellation)
                else:
                    self.addArray2(tessellation)
            
            imgBin = cv2.erode(imgBinPadded, kernel9) #go one layer deeper
Example #26
0
    def find_yellow_contours(self, split_image):
        lab_bthreshed_board = cv2.adaptiveThreshold(split_image.lab[2], 255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 
            self.options["board_blocksize"], self.options["board_C_b"])
        yuv_uthreshed_board = cv2.adaptiveThreshold(split_image.yuv[2], 255, 
            cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 
            self.options["board_blocksize"], self.options["board_C_u"])
        yuv_uthreshed_board = cv2.bitwise_not(yuv_uthreshed_board)
        finalThreshed = lab_bthreshed_board & yuv_uthreshed_board

        # Erode and dilate thresholded images
        morph_size = self.options["board_morph_size"]
        erode_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_size * 2 + 1, morph_size * 2 + 1),
                                                  (morph_size, morph_size))
        dilate_element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morph_size * 2 + 1, morph_size * 2 + 1),
                                                   (morph_size, morph_size))
        eroded = cv2.erode(finalThreshed,erode_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.dilate(eroded, dilate_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.dilate(finalThreshed, dilate_element, iterations = self.options["board_morph_iter"])
        finalThreshed = cv2.erode(finalThreshed, erode_element, iterations = self.options["board_morph_iter"])

        self.post_if_enabled('yellow_lab_bthreshed', lab_bthreshed_board)
        self.post_if_enabled('yellow_yuv_uthreshed', yuv_uthreshed_board)
        self.post_if_enabled('yellow_binary_image', finalThreshed)

        _, contours, hierarchy = cv2.findContours(np.copy(finalThreshed), 
            cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        return contours, hierarchy
    def find_red_position ( frame ):
        pos_x = last_x
        pos_y = last_y

        hsv_img = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
        red_mask1 = cv2.inRange( hsv_img, lower_red_l, lower_red_h )
        red_mask2 = cv2.inRange( hsv_img, upper_red_l, upper_red_h )
       
        red_mask = red_mask1 + red_mask2       

        red_mask = cv2.erode( red_mask, kernel )
        red_mask = cv2.dilate( red_mask, kernel )

        red_mask = cv2.dilate( red_mask, kernel ) 
        red_mask = cv2.erode( red_mask, kernel )

        o_moments = cv2.moments( red_mask )
        d_m01 = o_moments['m01']
        d_m10 = o_moments['m10']
        d_area = o_moments['m00']
        #print "MOMENTS " + str(d_m01) + " " + str(d_m10) + " " + str(d_area)

        if d_area > 10000:
            pos_x = int(d_m10 / d_area)
            pos_y = int(d_m01 / d_area)
            #print "[" + str(pos_x) + " , " + str(pos_y) + "]"
        return pos_x, pos_y, red_mask
    def postProcessImg(self, image, tag):
        # Cleans up remaining noise
        cv2.erode(image, self.ERODE_KERNEL, image)
        # cv2.dilate(image, self.DILATE_KERNEL, image)
        # self.autoRotate(image, tag)

        cv2.imwrite("processed" + tag + ".jpg", image)
Example #29
0
def segment_on_dt(a, img, gray):
    border = cv2.dilate(img, None, iterations=5)
    border = border - cv2.erode(border, None)


    dt = cv2.distanceTransform(img,cv2.DIST_L2,5)
    plt.subplot(3,3,4)
    plt.imshow(dt),plt.title('dt'),plt.xticks([]), plt.yticks([])

    dt = ((dt - dt.min()) / (dt.max() - dt.min()) * 255).astype(np.uint8)
    _, dt2 = cv2.threshold(dt, 0, 255, cv2.THRESH_BINARY)
    dt2 = cv2.erode(dt2, None, iterations=2)
    # dt2 = cv2.adaptiveThreshold(dt, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 5, 2)
    # dt1 = peak_local_max(gray, indices=False, min_distance=10, labels=img, threshold_abs=5)
    # dt2 = peak_local_max(dt, indices=False, min_distance=5, labels=img, threshold_abs=0)
    lbl, ncc = label(dt2)

    plt.subplot(3,3,5)
    plt.imshow(dt2),plt.title('localMax'),plt.xticks([]), plt.yticks([])
    # plt.subplot(3,3,6)
    # plt.imshow(ncc),plt.title('ncc'),plt.xticks([]), plt.yticks([])

    lbl = lbl * (255/ncc)
    # Completing the markers now.
    lbl[border == 255] = 255

    lbl = lbl.astype(np.int32)
    cv2.watershed(a, lbl)

    lbl[lbl == -1] = 0
    lbl = lbl.astype(np.uint8)

    plt.subplot(3,3,6)
    plt.imshow(lbl),plt.title('lbl_out'),plt.xticks([]), plt.yticks([])
    return 255 - lbl
Example #30
0
def obtain_cand(Initial,Initial2,Nsme,user,action,Total):
    TT = Initial[1].copy()
    Rg = Initial[0].copy()
    Output = []
    kernel = np.ones((7, 7), np.uint8)
    Mask2 = cv2.dilate(Initial2[1][:,:,0], kernel, 1)
    kernel = np.ones((4, 4), np.uint8)
    Mask2 = cv2.erode(Mask2, kernel, 1)
    Mask2 = cv2.bitwise_not(Mask2)
    kernel = np.ones((7, 7), np.uint8)
    Mask1 = cv2.dilate(Initial2[0][:,:,0], kernel, 1)
    kernel = np.ones((4, 4), np.uint8)
    Mask1 = cv2.erode(Mask1, kernel, 1)
    Mask1 = cv2.bitwise_not(Mask1)
    Rg1 = cv2.bitwise_and(Rg,Rg,mask=Mask1)
    Sup1 = cv2.bitwise_and(Initial[1],Initial[1],mask=Mask2)
    Sup = cv2.cvtColor(Sup1, cv2.COLOR_BGR2RGB)
    segments_fz = slic(Sup, n_segments=250, compactness=20, sigma=5)
    segments_fz[Mask2 < 1] = -1
    segments_fz += 2
    # Img_Slic = label2rgb(segments_fz, Sup, kind='avg')
    # Img_Slic_TT = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
    # Img_Slic = cv2.cvtColor(Img_Slic, cv2.COLOR_RGB2BGR)
    for i in xrange(len(Total)):
        col = [random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)]
        T= Total[i][0][0]
        x,y,x2,y2 = T[0],T[1],T[2],T[3]
        cv2.rectangle(Rg1, (T[4], T[5]), (T[6],T[7]), col, 2)
        P1 = Obj_segment.Rect.Point(T[4], T[5])
        P2 = Obj_segment.Rect.Point(T[6],T[7])
        Rec_top = Obj_segment.Rect.Rect(P1,P2)
        sp = np.array(segments_fz[y:y2,x:x2])
        sp = np.unique(sp)
        if len(sp) == 0:
            # Output =Img_Slic[y:y2,x:x2]
            P1 = Obj_segment.Rect.Point(x,y)
            P2 = Obj_segment.Rect.Point(x2,y2)
            rec = Obj_segment.Rect.Rect(P1,P2)
        elif sp[0] ==[1] and len(sp)==1:
            # Output = Img_Slic[y:y2, x:x2]
            P1 = Obj_segment.Rect.Point(x, y)
            P2 = Obj_segment.Rect.Point(x2, y2)
            rec = Obj_segment.Rect.Rect(P1, P2)
        else:
            rmin, rmax,cmin, cmax = bbox2(segments_fz, sp,(x,y),(x2,y2))
            if rmin is None:
                continue
            # Output = TT[cmin:cmax,rmin:rmax]
            P1 = Obj_segment.Rect.Point(rmin, cmin)
            P2 = Obj_segment.Rect.Point(rmax, cmax)
            rec = Obj_segment.Rect.Rect(P1, P2)
        Ouput_Top = Rg[T[5]:T[7],T[4]:T[6]]
        Output.append((rec,Rec_top))
        # cv2.imwrite("Morphed/Patches_Front/"+user+"_"+action+"_"+Nsme[:-4]+"_"+i.__str__()+"_Front.jpg",Output)
        # cv2.imwrite("Morphed/Patches_Top/" + user + "_" + action + "_" + Nsme[:-4] + "_" + i.__str__() + "_Top.jpg", Ouput_Top)
        # cv2.rectangle(Img_Slic_TT,(x,y),(x2,y2),col,3)
    # cv2.imwrite("Morphed/Top/" + user + "_" + action + "_" + Nsme[:-4] + "_v2" + "_Top.jpg", Rg1)
    # cv2.imwrite("Morphed/Front/"+user+"_"+action+"_"+Nsme[:-4]+"_v2"+ "_Front.jpg",Img_Slic_TT)
    return Output
Example #31
0
    # if we are viewing a video and we did not grab a frame,
    # then we have reached the end of the video
    if frame is None:
        break

    # resize the frame, blur it, and convert it to the HSV
    # color space
    frame = imutils.resize(frame, width=1000)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
    hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)

    # construct a mask for the color "green", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    mask = cv2.inRange(hsv, greenLower, greenUpper)
    mask = cv2.erode(mask, None, iterations=2)
    mask = cv2.dilate(mask, None, iterations=2)

    # find contours in the mask and initialize the current
    # (x, y) center of the ball
    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    center = None

    # only proceed if at least one contour was found
    if len(cnts) > 0:
        # find the largest contour in the mask, then use
        # it to compute the minimum enclosing circle and
        # centroid
        c = max(cnts, key=cv2.contourArea)
Example #32
0
def CCT_extract1(img,N,R):
         
    #存放解码结果的list
    CodeTable=[]
    '''
    image.shape[0], 图片垂直尺寸
    image.shape[1], 图片水平尺寸
    image.shape[2], 图片通道数
    '''
    img_shape=img.shape
    img_height=img_shape[0]
    img_width=img_shape[1]
    
#    print('img_width=',img_width)
#    print('img_height=',img_height)

    #将输入图像转换为灰度图
    img_gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    #使用Otsu对图像进行自适应二值化
    retval,img_bina=cv2.threshold(img_gray,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

    #使用findcontours函数对二值化后的图像进行轮廓提取,第三个参数为轮廓点的存储方式,这里选返回所有轮廓点,方便后面做筛选
    contours, hierarchy = cv2.findContours(img_bina,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)  
    #cv2.drawContours(img,contours,-1,(0,0,255),1) 
    #遍历提取出来的轮廓,筛选其中的椭圆轮廓    
    for contour in contours:
        area=cv2.contourArea(contour,False)
        length=cv2.arcLength(contour,True)
        #计算轮廓的圆度
        R0=2*math.sqrt(math.pi*area)/(length+1)
        if R0<R:                
            continue
        if len(contour)<20:                
            continue
        #在原图上绘制该条轮廓
#            cv2.drawContours(img,contour,-1,(0,0,255),2)
#            print('det_r=',det_r)
#            print('len(contour)=',len(contour))
        #将轮廓点集转换为numpy数组
        e_points=np.array(contour) 
        #得到拟合的椭圆参数:中心点坐标,尺寸,旋转角
        box1=cv2.fitEllipse(e_points)        
        #print('box1:',box1)       
        box2=tuple([box1[0],tuple([box1[1][0]*2,box1[1][1]*2]),box1[2]])
        box3=tuple([box1[0],tuple([box1[1][0]*3,box1[1][1]*3]),box1[2]])          
        #求得最外层椭圆的最小外接矩形的四个顶点,顺时针方向
        minRect = cv2.boxPoints(box3)     
        #计算椭圆的长轴
        a=max(box3[1][0],box3[1][1])
        s=1.33333*a    
        #在原图像中裁剪CCT所在的区域
        cct_roi=None
        row_min=round(box1[0][1]-s/2)
        row_max=round(box1[0][1]+s/2)
        col_min=round(box1[0][0]-s/2)
        col_max=round(box1[0][0]+s/2)
#            print('判断该ROI是否超出边界......')
#            print([row_min,row_max,col_min,col_max])
        #判断cct_roi是否超出原图像边界
        if row_min>=0 and row_max<=img_height and col_min>=0 and col_max<=img_width:
            #从原图像中将cct_roi截取出来            
            cct_roi=img[row_min:row_max,col_min:col_max]                   
            #cct_roi相对于原始影像的偏移量
            dx=box1[0][0]-s/2
            dy=box1[0][1]-s/2            
            #对CCT椭圆区域进行仿射变换将其变为正圆
            src=np.float32([[minRect[0][0]-dx,minRect[0][1]-dy],[minRect[1][0]-dx,minRect[1][1]-dy],
                            [minRect[2][0]-dx,minRect[2][1]-dy],[minRect[3][0]-dx,minRect[3][1]-dy],
                            [box1[0][0]-dx,box1[0][1]-dy]])
            dst=np.float32([[box1[0][0]-a/2-dx,box1[0][1]-a/2-dy],[box1[0][0]+a/2-dx,box1[0][1]-a/2-dy],
                            [box1[0][0]+a/2-dx,box1[0][1]+a/2-dy],[box1[0][0]-a/2-dx,box1[0][1]+a/2-dy],
                            [box1[0][0]-dx,box1[0][1]-dy]])        
            #得到仿射变换矩阵
            #M=cv2.getAffineTransform(src,dst)
            M=my_getAffineTransform(src,dst)
            if isinstance(M,int):
                continue
            #计算仿射变换后的中心点坐标
            X0,Y0=PointAffineTransform(M,[box1[0][0]-dx,box1[0][1]-dy])
            #print('X0=',X0,'  ','Y0=',Y0)
            CCT_img=None
            #对cct_roi进行仿射变换
            cct_roi_size=np.shape(cct_roi)
            if cct_roi_size[0]>0 and cct_roi_size[1]>0:
                CCT_img=cv2.warpAffine(cct_roi,M,(round(s),round(s)))
            #print('cct img shape=',np.shape(CCT_img))
            #对仿射变换后的CCT进行缩放
            CCT_large = cv2.resize(CCT_img, (0, 0), fx=200.0/s, fy=200.0/s, interpolation=cv2.INTER_LANCZOS4)                            
            #将放大后的CCT转换为灰度图
            CCT_gray=cv2.cvtColor(CCT_large,cv2.COLOR_BGR2GRAY)
#           #对该灰度图进行自适应二值化
            retval,CCT_bina=cv2.threshold(CCT_gray,0,1,cv2.THRESH_BINARY+cv2.THRESH_OTSU)            
            kernel=cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
#           #执行腐蚀
            CCT_eroded=cv2.erode(CCT_bina,kernel)
            #plt.imshow(CCT_bina)
            #plt.show()
            #判断这个区域里是不是CCT
            if CCT_or_not(CCT_eroded):
#                print('len(contour)=',len(contour))
#                print('a=',box3[1][0]/3)
#                print('b=',box3[1][1]/3)
#                print('s=',s)
#                print('R0=',R0)
                #调用解码函数进行解码
                code=CCT_Decode(CCT_eroded,N)
                CodeTable.append([code,box1[0][0],box1[0][1]])
#                print([code,box1[0][0],box1[0][1]])
                #将编码在原图像中绘制出来.各参数依次是:图片,添加的文字,左上角坐标,字体,字体大小,颜色,字体粗细
                cv2.putText(img,str(code),(int(box3[0][0]-0.25*s),int(box1[0][1]+0.5*s)),cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
                #绘制拟合出的椭圆
                cv2.ellipse(img,box1,(0,255,0),1)
                cv2.ellipse(img,box2,(0,255,0),1)
                cv2.ellipse(img,box3,(0,255,0),1)   
    return CodeTable,img
Example #33
0
def processing(path, geo_path, depth_path, density_path, info_path, save_path,
               nid):
    with open(path, 'rb') as f:
        img = pickle.load(f)
    #img = cv2.imread(depth_path, 0)
    #img = np.array(img)
    rows, columns = img.shape
    #depth_image = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    #depth_image = cv2.cvtColor(depth_image,cv2.COLOR_BGR2GRAY)
    with open(info_path, 'rb') as f:
        info = pickle.load(f)
    ref_plane = info['ref_plane']
    buttom_edge = info['buttom_edge']
    left_edge = info['left_edge']
    horizontal, vertical = region_divide(nid, img, depth_path, ref_plane,
                                         save_path)
    geo_img_h = Image.open(geo_path)
    geo_img_v = geo_img_h.copy()

    density_image = cv2.imread(density_path, 0)
    bg = img.min()

    #img = signal.medfilt2d(img, (5,5))
    global_avg = _calculate_average(img,
                                    bg,
                                    modality=config.modality_reference_average)
    averages, _ = reference_averages(
        img,
        density_image,
        horizontal,
        vertical,
        global_avg,
        bg,
        modality=config.modality_reference_average)

    blue_img = np.zeros((rows, columns, 3), dtype=np.uint8)
    blue_img[:, :, 0] = 255
    red_img = np.zeros((rows, columns, 3), dtype=np.uint8)
    red_img[:, :, 2] = 255
    reference_image = cv2.imread(depth_path, 1)
    reference_image_2 = reference_image.copy()
    tmp_averages = np.expand_dims(averages + bias, axis=2).repeat(3, axis=2)
    tmp_img = np.expand_dims(img, axis=2).repeat(3, axis=2)
    reference_image = np.where((tmp_img - tmp_averages) > bias, red_img,
                               reference_image)
    reference_image = np.where(
        np.abs(tmp_img - tmp_averages) < bias, blue_img, reference_image)

    for c in vertical:
        cv2.line(reference_image, (c, 0), (c, rows), (255, 255, 255), 2, 4)
    for h in horizontal:
        cv2.line(reference_image, (0, h), (columns, h), (255, 255, 255), 2, 4)

    horizontal, vertical = refine_divide(reference_image, horizontal, vertical,
                                         nid, save_path)
    averages, avg_density = reference_averages(
        img,
        density_image,
        horizontal,
        vertical,
        global_avg,
        bg,
        modality=config.modality_reference_average)
    tmp_averages = np.expand_dims(averages + bias, axis=2).repeat(3, axis=2)
    tmp_img = np.expand_dims(img, axis=2).repeat(3, axis=2)
    reference_image_2 = np.where((tmp_img - tmp_averages) > bias, red_img,
                                 reference_image_2)
    reference_image_2 = np.where(
        np.abs(tmp_img - tmp_averages) < bias, blue_img, reference_image_2)

    for c in vertical:
        cv2.line(reference_image_2, (c, 0), (c, rows), (255, 255, 255), 2, 4)
    for h in horizontal:
        cv2.line(reference_image_2, (0, h), (columns, h), (255, 255, 255), 2,
                 4)

    scann_width = 5
    sub_lines_h, sub_lines_v, bg_h, bg_v = find_lines(img, averages,
                                                      avg_density, scann_width)

    im_cv_contour = cv2.imread(geo_path, 1)
    im_inner_rect_cand = im_cv_contour.copy()
    im_inner_rect_pred = im_cv_contour.copy()

    scann_line_img_h = np.zeros((rows, columns), dtype=np.uint8)
    scann_line_img_v = scann_line_img_h.copy()
    scann_line_bg = scann_line_img_h.copy()

    draw = ImageDraw.Draw(geo_img_h)
    draw_vertical = ImageDraw.Draw(geo_img_v)

    for s, e, h in sub_lines_h:
        #if (e-s)*0.02 < 0.3:  # filter out scann line that less than 0.3m
        #    continue
        if (e - s) * 0.02 > 6.0:
            bg_h.append([s, e, h])
            continue
        scann_line_img_h[h, s:e + scann_width] = 255
        draw.line((s, h) + (e + scann_width, h), fill=255)

    for s, e, w in sub_lines_v:
        #if (e-s)*0.02 < 0.1:  # filter out scann line that less than 0.3m
        #    continue
        if (e - s) * 0.02 > 6.0:
            bg_v.append([s, e, w])
            continue
        scann_line_img_v[s:e + scann_width, w] = 255
        draw_vertical.line((w, s) + (w, e + scann_width), fill=255)

    for s, e, h in bg_h:
        scann_line_bg[h, s:e + scann_width] = 255
    for s, e, w in bg_v:
        scann_line_bg[s:e + scann_width, w] = 255

    scann_line_img_h = cv2.cvtColor(scann_line_img_h, cv2.COLOR_GRAY2BGR)
    gray = cv2.cvtColor(scann_line_img_h, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)

    verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))
    vertical = cv2.erode(binary, verticalStructure)
    vertical = cv2.dilate(vertical, verticalStructure)

    # close holes to make it solid rectangle
    kernel = np.ones((5, 5), np.uint8)
    close_h = cv2.morphologyEx(vertical, cv2.MORPH_CLOSE, kernel)

    scann_line_img_v = cv2.cvtColor(scann_line_img_v, cv2.COLOR_GRAY2BGR)
    gray_v = cv2.cvtColor(scann_line_img_v, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray_v, 0, 255, cv2.THRESH_BINARY)

    hStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
    tmp = cv2.erode(binary, hStructure)
    tmp = cv2.dilate(tmp, hStructure)

    # close holes to make it solid rectangle
    kernel = np.ones((5, 5), np.uint8)
    close_v = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel)

    scann_line_bg = cv2.cvtColor(scann_line_bg, cv2.COLOR_GRAY2BGR)
    gray = cv2.cvtColor(scann_line_bg, cv2.COLOR_BGR2GRAY)
    ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)

    verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    scann_line_bg = cv2.erode(binary, verticalStructure)
    scann_line_bg = cv2.dilate(scann_line_bg, verticalStructure)

    foreground_mask = 255 - scann_line_bg

    close = cv2.bitwise_and(close_v, close_h)
    close = cv2.bitwise_and(close, foreground_mask)

    _, contours, hierarchy = cv2.findContours(close, cv2.RETR_LIST,
                                              cv2.CHAIN_APPROX_NONE)
    mask = np.ones((rows, columns, 3), dtype="uint8")
    idx = 0
    candidates = list()
    rects = list()
    for c in contours:
        # compute the center of the contour

        M = cv2.moments(c)
        #x, y, w, h = cv2.boundingRect(c)
        x, y, w, h = cv2.boundingRect(c)
        # compensation of scann_height
        h = h + int(avg_density[y + h // 2, x + w // 2]) - 1

        leftup = (x, y)
        rightdown = (x + w, y + h)
        #leftup, rightdown = order_points(c.reshape(c.shape[0], 2))
        #w = rightdown[0] - leftup[0]
        #h = rightdown[1] - leftup[1]

        if (w < 12 or h <= 12) and (leftup[0] > rows - 100):  # w<20, h<12
            continue
        if (w < 20 or h <= 12) and (leftup[0] <= rows - 100):  # w<20, h<12
            continue
        #if w*h*0.02*0.02<0.1:
        #    continue
        if w * 0.02 > 6.0 or h * 0.02 > 4.0:
            continue
        if w / h >= 5 or w / h < 0.2:
            continue
        #print(w, h)
        rects.append([x, y, w, h])
        cX = int(M["m10"] / max(M["m00"], 0.0001))
        cY = int(M["m01"] / max(M["m00"], 0.0001))
        # draw the contour and center of the shape on the image
        cv2.drawContours(im_cv_contour, [c], -1, (255, 255, 255), 2)

        cv2.circle(im_cv_contour, (cX, cY), 3, (255, 255, 255), -1)

        candidates.append(
            np.array([[leftup[0], leftup[1], rightdown[0], rightdown[1]]]))
        cv2.rectangle(im_inner_rect_cand, leftup, rightdown, (0, 0, 255))

        cv2.drawContours(mask, [c], -1, (255, 255, 255), 2)
        cv2.rectangle(mask, leftup, rightdown, (0, 0, 255))
        cv2.circle(mask, (cX, cY), 3, (255, 255, 255), -1)
        cv2.putText(mask, "{}".format(idx), (cX - 10, cY - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
        idx += 1
        #cv2.imshow("Image", im_cv)
        #cv2.waitKey(0)
    if len(candidates) > 0:
        candidates = np.concatenate(candidates, axis=0)
    else:
        candidates = None
    labels = load_label(root, nid)
    if labels is not None:
        labels = utils.label_convert(labels, columns, rows)

    final_rects = candidate_process(img, avg_density, rects, foreground_mask)
    final_rects = utils.prediction_filter(final_rects, candidates)

    predictions = list()
    if final_rects is not None:
        for e in final_rects:
            x0, y0, x1, y1 = e

            leftup = (x0, y0)
            rightdown = (x1, y1)

            density = np.median(avg_density[y0:y1, x0:x1])
            w = x1 - x0
            h = y1 - y0
            #leftup, rightdown = order_points(c.reshape(c.shape[0], 2))
            #w = rightdown[0] - leftup[0]
            #h = rightdown[1] - leftup[1]

            if (w < 12 or h <= 12) and (leftup[0] > rows - 100):  # w<20, h<12
                continue
            if (w < 20 or h <= 12) and (leftup[0] <= rows - 100):  # w<20, h<12
                continue
            #if w*h*0.02*0.02<0.1:
            #    continue
            if w * 0.02 > 5.0 or h * 0.02 > 4.0:
                continue
            if w / h >= 5 or w / h < 0.2:
                continue

            predictions.append(
                np.array([[leftup[0], leftup[1], rightdown[0], rightdown[1]]]))

            cv2.rectangle(im_inner_rect_pred, leftup, rightdown, (0, 255, 0))

    if len(predictions) > 0:
        predictions = np.concatenate(predictions, axis=0)
        path_pred_txt = os.path.join(save_path, 'pred_txt')
        if not os.path.exists(path_pred_txt):
            os.makedirs(path_pred_txt)
        with open(os.path.join(path_pred_txt, '{}.txt'.format(nid)), 'w') as f:
            for i, e in enumerate(predictions):
                #f.write(str(cls_box_scores[i])+' '+ ' '.join(map(str, e))+'\n')
                f.write(' '.join(map(str, e)) + '\n')
    else:
        predictions = None

    #print(candidates[:, 0].max(), candidates[:, 1].max(),candidates[:, 2].max(), candidates[:, 3].max())
    #print(labels[:, 0].max(), labels[:, 1].max(),labels[:, 2].max(), labels[:, 3].max())

    tp, fp, fn = utils.confusion_matrix(predictions, labels)
    pred_height, actual_height = utils.height_error(predictions, labels)

    gdf = utils.window_line(predictions, img, averages, info_path, nid)
    path_window_geojson = os.path.join(save_path, 'window_geojson')
    if not os.path.exists(path_window_geojson):
        os.makedirs(path_window_geojson)

    path_window_ply = os.path.join(save_path, 'ply_detected_windows')
    if not os.path.exists(path_window_ply):
        os.makedirs(path_window_ply)
    #ply_path = '/Volumes/Qing Xiao/ikg/4_detection/part_dataset_v2/ply_in_depth/28_in_depth.ply'
    utils.window_line_cp(predictions, labels, averages + bias, info_path,
                         path_window_ply, nid)

    if not gdf.empty:
        gdf.to_file(os.path.join(path_window_geojson,
                                 '{}.geojson'.format(nid)),
                    driver='GeoJSON')

    #print("precision:", tp/(tp+fp))
    #print('recall:', tp/(tp+fn))
    path_image_tpfpfn = os.path.join(save_path, 'image_tpfpfn')
    if not os.path.exists(path_image_tpfpfn):
        os.makedirs(path_image_tpfpfn)
    draw_mis(geo_path, path_image_tpfpfn, predictions, labels, nid)

    path_scann_line_in_image_h = os.path.join(save_path,
                                              'scann_line_in_image_h')
    if not os.path.exists(path_scann_line_in_image_h):
        os.makedirs(path_scann_line_in_image_h)
    path_refer_mask_refine = os.path.join(save_path, 'refer_mask_refine')
    if not os.path.exists(path_refer_mask_refine):
        os.makedirs(path_refer_mask_refine)
    path_scann_line_h = os.path.join(save_path, 'scann_line_h')
    if not os.path.exists(path_scann_line_h):
        os.makedirs(path_scann_line_h)
    path_fg_mask = os.path.join(save_path, 'fg_mask')
    if not os.path.exists(path_fg_mask):
        os.makedirs(path_fg_mask)

    path_scann_line_v = os.path.join(save_path, 'scann_line_v')
    if not os.path.exists(path_scann_line_v):
        os.makedirs(path_scann_line_v)

    path_scann_line_in_image_v = os.path.join(save_path,
                                              'scann_line_in_image_v')
    if not os.path.exists(path_scann_line_in_image_v):
        os.makedirs(path_scann_line_in_image_v)

    path_morph = os.path.join(save_path, 'scann_line_morph')
    if not os.path.exists(path_morph):
        os.makedirs(path_morph)

    path_morph_h = os.path.join(save_path, 'scann_line_morph_h')
    if not os.path.exists(path_morph_h):
        os.makedirs(path_morph_h)
    path_morph_v = os.path.join(save_path, 'scann_line_morph_v')
    if not os.path.exists(path_morph_v):
        os.makedirs(path_morph_v)

    path_contours = os.path.join(save_path, 'contours')
    if not os.path.exists(path_contours):
        os.makedirs(path_contours)
    path_mask = os.path.join(save_path, 'mask')
    if not os.path.exists(path_mask):
        os.makedirs(path_mask)
    path_inner_rect_cand = os.path.join(save_path, 'inner_rect_cand')
    if not os.path.exists(path_inner_rect_cand):
        os.makedirs(path_inner_rect_cand)
    path_inner_rect_pred = os.path.join(save_path, 'inner_rect_pred')
    if not os.path.exists(path_inner_rect_pred):
        os.makedirs(path_inner_rect_pred)
    path_refer_mask = os.path.join(save_path, 'refer_mask')
    if not os.path.exists(path_refer_mask):
        os.makedirs(path_refer_mask)

    path_scann_line_dbscan = os.path.join(save_path, 'scann_line_dbscan')
    if not os.path.exists(path_scann_line_dbscan):
        os.makedirs(path_scann_line_dbscan)
    path_dbscan_rect = os.path.join(save_path, 'dbscan_rect')
    if not os.path.exists(path_dbscan_rect):
        os.makedirs(path_dbscan_rect)

    #cluster_img.save(os.path.join(path_scann_line_dbscan, '{}.png'.format(nid)))
    #cv2.imwrite(os.path.join(path_dbscan_rect, '{}.png'.format(nid)), im_outer_rect)
    cv2.imwrite(os.path.join(path_refer_mask_refine, '{}.png'.format(nid)),
                reference_image_2)
    cv2.imwrite(os.path.join(path_refer_mask, '{}.png'.format(nid)),
                reference_image)
    geo_img_h.save(
        os.path.join(path_scann_line_in_image_h, '{}.png'.format(nid)))
    geo_img_v.save(
        os.path.join(path_scann_line_in_image_v, '{}.png'.format(nid)))

    cv2.imwrite(os.path.join(path_scann_line_h, '{}.png'.format(nid)),
                scann_line_img_h)
    cv2.imwrite(os.path.join(path_fg_mask, '{}.png'.format(nid)),
                scann_line_bg)

    cv2.imwrite(os.path.join(path_morph, '{}.png'.format(nid)), close)
    cv2.imwrite(os.path.join(path_contours, '{}.png'.format(nid)),
                im_cv_contour)
    cv2.imwrite(os.path.join(path_inner_rect_cand, '{}.png'.format(nid)),
                im_inner_rect_cand)
    cv2.imwrite(os.path.join(path_inner_rect_pred, '{}.png'.format(nid)),
                im_inner_rect_pred)

    cv2.imwrite(os.path.join(path_mask, '{}.png'.format(nid)), mask)
    cv2.imwrite(os.path.join(path_scann_line_v, '{}.png'.format(nid)),
                scann_line_img_v)
    cv2.imwrite(os.path.join(path_morph_h, '{}.png'.format(nid)), close_h)
    cv2.imwrite(os.path.join(path_morph_v, '{}.png'.format(nid)), close_v)

    return tp, fp, fn, pred_height, actual_height, gdf
Example #34
0
def change1(val):
    None


def change2(val):
    None


# used to test canny threshold values
cv.createTrackbar("p1", "Canny", 0, 255, change1)
cv.createTrackbar("p2", "Canny", 0, 255, change2)

# blur -> erode -> dilate -> find edges with canny -> find circles with HoughCircles
blur = cv.GaussianBlur(hsv, (11, 11), 0)
erode = cv.erode(blur, kernel, 1)
dilate = cv.dilate(erode, kernel, 1)
edges = cv.Canny(dilate, 151, 155)
circles = cv.HoughCircles(edges,
                          cv.HOUGH_GRADIENT,
                          1,
                          20,
                          param1=30,
                          param2=15,
                          minRadius=5,
                          maxRadius=40)
res = img.copy

# create a mask for each circle and use the mask to find the mean color inside of them
mean_list = []
for i in circles[0, :]:
Example #35
0
    #frame = np.array(mask_img[:,:,:])
    #fakewebcam.schedule_frame(frame)
    #cv2.imwrite("output.jpg", frame)
    #break

    mask_img = tf.keras.preprocessing.image.img_to_array(mask_img,
                                                         dtype=np.uint8)

    if config["dilate"]:
        mask_img = cv2.dilate(mask_img,
                              np.ones((config["dilate"], config["dilate"]),
                                      np.uint8),
                              iterations=1)
    if config["erode"]:
        mask_img = cv2.erode(mask_img,
                             np.ones((config["erode"], config["erode"]),
                                     np.uint8),
                             iterations=1)
    if config["blur"]:
        mask_img = cv2.blur(mask_img, (config["blur"], config["blur"]))
    segmentationMask_inv = np.bitwise_not(mask_img)

    for c in range(3):
        frame[:,:,c] = frame[:,:,c] * (mask_img[:,:,0] / 255.) + \
            replacement_bgs[replacement_bgs_idx][:,:,c] * (1.0-(mask_img[:,:,0] / 255.))

    replacement_bgs_idx = (replacement_bgs_idx + 1) % len(replacement_bgs)

    if config.get("debug_show_mask", False):
        frame = np.array(mask_img[:, :, :])
    fakewebcam.schedule_frame(frame)
index = 0
# Load the video
camera = cv2.VideoCapture(0)

# Keep looping
while True:
    # Grab the current paintWindow
    (grabbed, frame) = camera.read()
    frame = cv2.flip(frame, 1)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Determine which pixels fall within the blue boundaries and then blur the binary image
    blueMask = cv2.inRange(hsv, blueLower, blueUpper)
    blueMask = cv2.erode(blueMask, kernel, iterations=2)
    blueMask = cv2.morphologyEx(blueMask, cv2.MORPH_OPEN, kernel)
    blueMask = cv2.dilate(blueMask, kernel, iterations=1)

    # Find contours (bottle cap in my case) in the image
    (_, cnts, _) = cv2.findContours(blueMask.copy(), cv2.RETR_EXTERNAL,
    	cv2.CHAIN_APPROX_SIMPLE)
    center = None

    # Check to see if any contours were found
    if len(cnts) > 0:
    	# Sort the contours and find the largest one -- we
    	# will assume this contour correspondes to the area of the bottle cap
        cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
        # Get the radius of the enclosing circle around the found contour
        ((x, y), radius) = cv2.minEnclosingCircle(cnt)
Example #37
0
def extract_cords(imgPath):
    cords_array = []
    # Read the image
    img = cv2.imread(imgPath, 0)

    # Thresholding the image
    (thresh, img_bin) = cv2.threshold(img, 128, 255,cv2.THRESH_BINARY|     cv2.THRESH_OTSU)
    # Invert the image
    img_bin = 255-img_bin 
    cv2.imwrite("Image_bin.jpg",img_bin)
    # Defining a kernel length
    kernel_length = np.array(img).shape[1]//80

    # A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
    verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
    # A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
    hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
    # A kernel of (3 X 3) ones.
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    # Morphological operation to detect vertical lines from an image
    img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
    verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#     cv2.imwrite("verticle_lines.jpg",verticle_lines_img)
    # Morphological operation to detect horizontal lines from an image
    img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
    horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#     cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)
    # Weighting parameters, this will decide the quantity of an image to be added to make a new image.
    alpha = 0.5
    beta = 1.0 - alpha
    # This function helps to add two image with specific weight parameter to get a third image as summation of two image.
    img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
    img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
    (thresh, img_final_bin) = cv2.threshold(img_final_bin, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#     cv2.imwrite("img_final_bin.jpg",img_final_bin)
    # Find contours for image, which will detect all the boxes
    im2, contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # Sort all the contours by top to bottom.
    (contours, boundingBoxes) = sort_contours(contours, method="top-to-bottom")
    idx = 0
    for c in contours:
        # Returns the location and width,height for every contour
        x, y, w, h = cv2.boundingRect(c)
    #     print(x, y, w, h)
        if (w > 1000 and h > 800):
            idx += 1
#             HardCoding for testing, will remove later
            cords_array.append(((y+150)/5, (x-70)/7, (y+h)/7, (x+w+100)/7))
#             new_img = img[y:y+h, x:x+w]
    #         print(new_img)
    #         cv2.imwrite(os.path.join("cropped/",str(idx)) + '.png', new_img)
    #         PILimage = Image.fromarray(new_img)
    #         PILimage.save(os.path.join("cropped/",str(idx)) + '.png', dpi=(200,200))
    # # If the box height is greater then 20, widht is >80, then only save it as a box in "cropped/" folder.
        if (w > 800 and h > 500) and w>3*h:
            idx += 1
            new_img = img[y:y+h, x:x+w]
#             cv2.imwrite(os.path.join("cropped/",str(idx)) + '.png', new_img)
            PILimage = Image.fromarray(new_img)
            PILimage.save(os.path.join("cropped/",str(idx)) + '.png', dpi=(200,200))
    
    return cords_array
Example #38
0
def main():

    global new_image

    rs_img = rs_process()
    rospy.init_node('hand_tracking', anonymous=True)
    rospy.loginfo("Hand Detection Start!")

    #Marker Publisher Initialize
    pub = rospy.Publisher('/hand_marker', Marker, queue_size=1)
    hand_mark = MarkerGenerator()
    hand_mark.type = Marker.SPHERE_LIST
    hand_mark.scale = [.07] * 3
    hand_mark.frame_id = "/camera_color_optical_frame"
    hand_mark.id = 0
    hand_mark.lifetime = 10000

    #hand detect args
    parser = argparse.ArgumentParser()
    parser.add_argument('-sth',
                        '--scorethreshold',
                        dest='score_thresh',
                        type=float,
                        default=0.5,
                        help='Score threshold for displaying bounding boxes')
    parser.add_argument('-fps',
                        '--fps',
                        dest='fps',
                        type=int,
                        default=1,
                        help='Show FPS on detection/display visualization')
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=640,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument(
        '-ds',
        '--display',
        dest='display',
        type=int,
        default=0,
        help='Display the detected images using OpenCV. This reduces FPS')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()
    num_hands_detect = 2

    im_width, im_height = (args.width, args.height)

    #time for fps calculation
    start_time = datetime.datetime.now()
    num_frames = 0

    #skin filter color
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    #######################################
    #Define the frame to transform
    #######################################
    target_frame = "/camera_color_optical_frame"  ######FROM
    reference_frame = "/base_link"  ####TO

    #####################################
    #Define the numpy array to record the consequences of the hand location
    ######################################
    # hand_pos = np.empty((1,3))

    is_transform_target = False

    if (is_transform_target):
        listener = tf.TransformListener()
        listener.waitForTransform(reference_frame, target_frame, rospy.Time(0),
                                  rospy.Duration(4.0))
        hand_mark.frame_id = reference_frame
    else:
        hand_mark.frame_id = target_frame

    hand_pose_list = []
    while not rospy.is_shutdown():
        #get rgb,depth frames for synchronized frames
        if not new_image:
            continue

        im_rgb = rs_image_rgb
        # im_rgb = cv2.cvtColor(rs_image_rgb, cv2.COLOR_BGR2RGB)
        im_depth = rs_image_depth
        new_image = False
        #add check

        # depth_map = np.array(rs_image_depth, dtype=np.uint8)
        depth_map = cv2.applyColorMap(
            cv2.convertScaleAbs(rs_image_depth, alpha=0.03), cv2.COLORMAP_JET)
        # cv2.imshow("Depth Image", depth_map)
        cv2.imshow("rs_image_rgb", rs_image_rgb)

        try:
            image_np = im_rgb
        except:
            print("Error converting to RGB")

        # actual hand detection
        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)
        # draw bounding boxes
        detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                         scores, boxes, im_width, im_height,
                                         image_np)

        if (scores[0] > args.score_thresh):
            (left, right, top,
             bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
                        boxes[0][0] * im_height, boxes[0][2] * im_height)
            p1 = (int(left), int(top))
            p2 = (int(right), int(bottom))
            # print p1,p2,int(left),int(top),int(right),int(bottom)
            image_hand = image_np[int(top):int(bottom), int(left):int(right)]
            # cv2.namedWindow("hand", cv2.WINDOW_NORMAL)
            cv2.imshow('hand', cv2.cvtColor(image_hand, cv2.COLOR_RGB2BGR))

            align_hand = im_rgb[int(top):int(bottom), int(left):int(right)]
            align_depth = depth_map[int(top):int(bottom), int(left):int(right)]
            align_hand_detect = np.hstack((align_hand, align_depth))
            # cv2.namedWindow('align hand', cv2.WINDOW_AUTOSIZE)
            # cv2.imshow('align hand', align_hand_detect)

            # cv2.imshow('align_hand_bgr', align_hand)
            align_hand = cv2.cvtColor(align_hand, cv2.COLOR_BGR2RGB)
            #skin filtering
            converted = cv2.cvtColor(align_hand, cv2.COLOR_BGR2HSV)
            skinMask = cv2.inRange(converted, lower, upper)
            # cv2.imshow("skinMask", skinMask)

            # apply a series of erosions and dilations to the mask
            # using an elliptical kernel
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            # skinMask = cv2.erode(skinMask, kernel, iterations = 2)
            # skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            skinMask = cv2.erode(skinMask, kernel, iterations=3)
            skinMask = cv2.dilate(skinMask, kernel, iterations=3)

            # blur the mask to help remove noise, then apply the
            # mask to the frame
            skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
            skin = cv2.bitwise_and(align_hand, align_hand, mask=skinMask)
            # show the skin in the image along with the mask
            # cv2.imshow("images", np.hstack([align_hand, skin]))
            #end skin

            depth_pixel = [(int(left) + int(right)) / 2,
                           (int(top) + int(bottom)) / 2]
            # depth_point = [0.0,0.0,0.0]
            depth_point = rs.rs2_deproject_pixel_to_point(
                depth_intrin, depth_pixel,
                im_depth[depth_pixel[1], depth_pixel[0]] * depth_scale)
            print depth_point
            hand_mark.counter = 0
            t = rospy.get_time()
            hand_mark.color = [0, 1, 0, 1]

            # hand_mark.id = hand_mark.id + 1
            # if (hand_mark.id > 100000) :
            #     hand_mark.id = 0
            # ## hand in /camera_color_optical_frame
            # print ('id ',hand_mark.id)
            m0 = hand_mark.marker(points=[(depth_point[0], depth_point[1],
                                           depth_point[2])])

            hand_point_x = depth_point[0]
            hand_point_y = depth_point[1]
            hand_point_z = depth_point[2]

            if (is_transform_target):
                #########################################################################
                ##convert /camera_color_optical_frame => /world
                #########################################################################

                #transform position from target_frame to reference frame
                target_ref_camera = PointStamped()
                target_ref_camera.header.frame_id = target_frame
                target_ref_camera.header.stamp = rospy.Time(0)
                target_ref_camera.point = m.points[0]

                p = listener.transformPoint(reference_frame, target_ref_camera)
                # p=listener.transformPoint(reference_frame,hand_mark)

                m = hand_mark.marker(points=[(p.point.x, p.point.y,
                                              p.point.z)])

                #substitute data for the variable
                hand_point_x = p.point.x
                hand_point_y = p.point.y
                hand_point_z = p.point.z

                # pub.publish(m)

                #offset z-axiz
                # hand_mark.id = 1
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m)
            else:
                # print('published!')
                ####append the data

                if 0.15 <= hand_point_z <= 0.75 and -0.4 <= hand_point_x <= 0.4:
                    print("recorded hand point")
                    hand_pose = [
                        hand_point_x, hand_point_y, hand_point_z, 0.0, 0.0,
                        0.0, 1.0
                    ]
                    print hand_pose
                    hand_pose_list.append(hand_pose)

                pub.publish(m0)

                #substitute data for the variable
                hand_point_x = depth_point[0]
                hand_point_y = depth_point[1] - 0.08
                hand_point_z = depth_point[2]

                #### offset z-axiz
                # hand_mark.id = 1
                m1 = hand_mark.marker(points=[(depth_point[0],
                                               depth_point[1] - 0.08,
                                               depth_point[2])])
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m1)

        # Calculate Frames per second (FPS)
        num_frames += 1
        elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
        fps = num_frames / elapsed_time

        #display window
        if (args.display > 0):
            # Display FPS on frame
            if (args.fps > 0):
                detector_utils.draw_fps_on_image("FPS : " + str(float(fps)),
                                                 image_np)

            cv2.imshow('Single Threaded Detection',
                       cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
        else:
            print("frames processed: ", num_frames, "elapsed time: ",
                  elapsed_time, "fps: ", str(int(fps)))

        if cv2.waitKey(10) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break

    print('save hand_pub.npy')
    # np.save('./hand_pub.npy',hand_pos)
    np.save("hand_pub", hand_pose_list)
import numpy as np
import cv2 as cv

import matplotlib.pyplot as plt

img = cv.imread('smarties.png', 0)
_, mask = cv.threshold(img, 200, 255, cv.THRESH_BINARY_INV)

kernal = np.ones((2, 2), np.uint8)
dialation = cv.dilate(mask, kernal, iterations=4)
erossion = cv.erode(mask, kernal, iterations=4)

#Erosion followed by dialation.
opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernal)

#Dialation and then erosion.
closing = cv.morphologyEx(mask, cv.MORPH_CLOSE, kernal)

titles = ['image', 'mask', 'dialation', 'erosion', 'opening', 'closing']
images = [img, mask, dialation, erossion, opening, closing]
for i in range(len(images)):

    plt.subplot(2, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])

plt.show()

cv.waitKey(0)
cv.destroyAllWindows()
Example #40
0
cap = cv2.VideoCapture(0)
upper = np.array([120,136,255])
lower = np.array([80,36,141])
while True:
    ret, frame = cap.read()
    w, h, _ = frame.shape
    
    w_c = int(w/2)
    h_c = int(h/2)
    
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
    k = np.ones((3,3))
    
    mask = cv2.inRange(frame,lower,upper)
    eroded = cv2.erode(mask,k,iterations = 1)
    dilated = cv2.dilate(eroded,k,iterations = 3)
    try:
        _, contours, _ = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
        cnt_max = max(contours,key = cv2.contourArea)
        rect = cv2.minAreaRect(cnt_max)
        box = cv2.boxPoints(rect)
        #return 4*2 np vertex
        box = np.int0(box)
        
        cv2.drawContours(frame,[box],0,(0,0,255),2)
    
        f = 600 #pixel
        A = int( rect[0][1])*-1 + w_c
        B = int( rect[0][0]) - h_c
        
Example #41
0
def Game():

    #参数设置
    bgModel = None
    cap_region_x_begin = 0.5  # start point/total width
    cap_region_y_end = 0.8  # start point/total width

    threshold = 60  # BINARY threshold(making picture obvious)

    blurValue = 41  # GaussianBlur parameter(smoothing picture)

    bgSubThreshold = 50

    learningRate = 0

    # variables

    isBgCaptured = 0  # bool, whether the background captured

    triggerSwitch = False  # if true, keyborad simulator works
    print("press 'b' to capture your background.")
    print("press 'n' to capture your gesture.")

    # Camera

    camera = cv2.VideoCapture(0)

    camera.set(10, 200)

    cv2.namedWindow('trackbar')

    cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)

    while camera.isOpened():  # capture and convert image

        ret, frame = camera.read()

        threshold = cv2.getTrackbarPos('trh1', 'trackbar')

        frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter

        frame = cv2.flip(frame, 1)  # flip the frame horizontally

        cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                      (frame.shape[1], int(cap_region_y_end * frame.shape[0])),
                      (255, 0, 0), 2)

        cv2.imshow('original', frame)

        #  Main operation

        if isBgCaptured == 1:  #  background  is captured

            fgmask = bgModel.apply(frame, learningRate=learningRate)

            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

            # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

            kernel = np.ones((3, 3), np.uint8)

            fgmask = cv2.erode(fgmask, kernel, iterations=1)

            img = cv2.bitwise_and(frame, frame, mask=fgmask)

            img = img[0:int(cap_region_y_end * frame.shape[0]),
                      int(cap_region_x_begin *
                          frame.shape[1]):frame.shape[1]]  # clip the ROI

            cv2.imshow('mask', img)

            # convert the image into binary image

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)

            cv2.imshow('blur', blur)

            ret, thresh = cv2.threshold(blur, threshold, 255,
                                        cv2.THRESH_BINARY)

            cv2.imshow('ori', thresh)

            # get the coutours

            thresh1 = copy.deepcopy(thresh)

            contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            length = len(contours)

            maxArea = -1

            if length > 0:

                for i in range(
                        length
                ):  # find the biggest contour (according to area)

                    temp = contours[i]

                    area = cv2.contourArea(temp)

                    if area > maxArea:
                        maxArea = area

                        ci = i

                res = contours[ci]

                hull = cv2.convexHull(res)

                drawing = np.zeros(img.shape, np.uint8)

                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)

                cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)

                hull = cv2.convexHull(
                    res, returnPoints=False
                )  # return the point index in the contour

                Flag = True
                if len(hull) > 3:

                    defects = cv2.convexityDefects(res,
                                                   hull)  # finding defects

                    if type(defects) != type(
                            None):  # avoid crashing.   (BUG not found)

                        cnt = 0

                        for i in range(
                                defects.shape[0]):  # calculate the angle

                            s, e, f, d = defects[i][0]

                            start = tuple(res[s][0])

                            end = tuple(res[e][0])

                            far = tuple(res[f][0])

                            a = math.sqrt((end[0] - start[0])**2 +
                                          (end[1] - start[1])**2)

                            b = math.sqrt((far[0] - start[0])**2 +
                                          (far[1] - start[1])**2)

                            c = math.sqrt((end[0] - far[0])**2 +
                                          (end[1] - far[1])**2)

                            angle = math.acos((b**2 + c**2 - a**2) /
                                              (2 * b * c))  # cosine theorem

                            if angle <= math.pi / 2:  # angle less than 90 degree, treat as fingers

                                cnt += 1

                                cv2.circle(drawing, far, 8, [211, 84, 0], -1)

                        isFinishCal, cnt, Flag = True, cnt, False
                if (Flag != False):
                    isFinishCal, cnt = False, 0

                if triggerSwitch is True:

                    if isFinishCal is True and cnt <= 5:
                        #To determine what the player gesture represents
                        if cnt == 0:
                            print("stone")
                            camera.release()
                            cv2.destroyAllWindows()
                            break
                        elif cnt == 1:
                            print("scissors")
                            camera.release()
                            cv2.destroyAllWindows()
                            break
                        elif cnt == 4:
                            #Change the value of cnt for easy sorting later
                            cnt = 2
                            print("paper")
                            camera.release()
                            cv2.destroyAllWindows()
                            break

            cv2.imshow('output',
                       drawing)  # drawing the contour of one's gesture

        # Keyboard OP

        k = cv2.waitKey(10)

        if k == 27:  # press ESC to exit

            camera.release()

            cv2.destroyAllWindows()

            break

        elif k == ord('b'):  # press 'b' to capture the background

            bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)

            isBgCaptured = 1

            print('!!!Background Captured!!!')

        elif k == ord('r'):  # press 'r' to reset the background

            bgModel = None

            triggerSwitch = False

            isBgCaptured = 0

            print('!!!Reset BackGround!!!')

        elif k == ord('n'):  # press 'n' to count the number

            triggerSwitch = True

            print('!!!Trigger On!!!')
    play = []
    play.append('rock')
    play.append('scissors')
    play.append('paper')
    p1 = cnt
    pc = random.randint(0, 2)
    # print p1,' ',pc,'\n'
    print("you are ", play[p1], ",and the computer is ", play[pc], "\n")
    #to judge the winner of the game.
    if (p1 == pc):
        print("Game Draw\n")
        game_result = 1
    if ((p1 == 0 and pc == 1) or (p1 == 1 and pc == 2)
            or (p1 == 2 and pc == 0)):
        print('you win!\n')
        game_result = 1
    else:
        print('you lose!\n')
        game_result = -1
    return game_result
Example #42
0
        red = cv2.dilate(red_mask, kernal)
        red1 = cv2.bitwise_and(frame, frame, mask=red_mask)

        contours, ret = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for pic, contour in enumerate(contours):
            area = cv2.contourArea(contour)
            if area > 200:
                x, y, w, h = cv2.boundingRect(contour)
                frame = cv2.drawContours(frame, contours, -1, [0, 255, 0], 3)
                cv2.putText(frame, "Red Color", (x - 3, y - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))


        low_blue = np.array([94, 80, 2], np.uint8)
        high_blue = np.array([126, 255, 255], np.uint8)
        blue_mask = cv2.inRange(hsv, low_blue, high_blue)
        blue = cv2.erode(blue_mask, kernal)
        blue1 = cv2.bitwise_and(frame, frame, mask=blue_mask)

        contours, ret = cv2.findContours(blue, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for pic, contour in enumerate(contours):
            area = cv2.contourArea(contour)
            if area > 200:
                x, y, w, h = cv2.boundingRect(contour)
                frame = cv2.drawContours(frame, contours, -1, [0, 255, 0], 3)
                cv2.putText(frame, "Blue Color", (x - 3, y - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0))


        low_green = np.array([65, 60, 60], np.uint8)
        high_green = np.array([80, 255, 255], np.uint8)
        green_mask = cv2.inRange(hsv, low_green, high_green)
        green = cv2.dilate(green_mask, kernal)
 #        dicom_names = reader.GetGDCMSeriesFileNames(file)
 #        reader.SetFileNames(dicom_names)
 #        reader.MetaDataDictionaryArrayUpdateOn()
 #        reader.LoadPrivateTagsOn()
 #        dicom_image = reader.Execute()
 #        print("Image read!")
         
             brain = segment_brain(1,dicom_image,model)
             
             brain = sitk.GetArrayFromImage(brain)
             brain = brain>0
             for i in range(0,brain.shape[0]):
                 brain[i,:,:] = binary_fill_holes(brain[i,:,:])
                 
             brain=np.array(brain,dtype='int16')
             brain = cv2.erode(brain,np.ones((3,3),np.uint8),iterations = 1)    
             
             ct_scan = sitk.GetArrayFromImage(dicom_image)
             
             slices=np.sum(brain,axis=1)
             slices=np.sum(slices,axis=1)
             
             slices = slices>0
             
             ct_scan = ct_scan
             ct_scan[brain==0] = 0   
             ct_scan = ct_scan[slices[:],:,:]
     
              
             ct_scan = sitk.GetImageFromArray(ct_scan)
             
Example #44
0
def handle_LBP(gray_img,
               sample_coord,
               block_size=(20, 60),
               similar_condi=0.85):
    """
    function:
        handle_LBP(gray_img, sample_coord[, block_size=(20, 60)[, similar_condi=0.85]]):
        計算原始圖像和 sample 的 LBP 相似度

    parameter:
        gray_img: 調整大小後的灰階圖像
        sample_coord: 所有 sample 位於原始圖像的位置(左上角座標)
        block_size: 和 handle_sample 的 block_size 相同
        similar_condi: 相似度的門檻值, 默認 0.85, 範圍 [0, 1), float

    method:
        1. 計算 sample LBP 值以及直方圖
        2. 相似度比較去除最不相似的 sample
        (採取逐一比較的方式, 去除相似度最低的, 其餘的都當成 markers)
        (相似度最低的判斷為 當前相似個數 - 最小相似個數 > 全距 // 2)
        3. 天空全部都標成 marker(最上面一行)
        4. 侵蝕一次

    return:
        markers: 二值化的圖像, 用來當成 watershed 的 markers
    """

    markers = np.zeros(gray_img.shape, np.uint8)
    width, height = block_size

    # 儲存 每張 LBP 值的 直方圖列表
    hist_list = list()

    for coord in sample_coord:
        y, x = coord
        target = gray_img[y:y + height, x:x + width]

        # 1. 計算 LBP 值(為了要使用 cv2.calcHist 函數, 要把圖像的資料類型轉成 np.uint8)
        lbp_img = lbp(target, 8, 1).astype(np.uint8)
        hist = cv2.calcHist([lbp_img], [0], None, [256], [0, 256])
        hist_list.append(hist)

        # 畫出所有 sample 的位置
        cv2.rectangle(gray_img, (x, y), (x + width, y + height),
                      (255, 255, 255), 5)
        # cv2.imshow('sample region', gray_img)

    # 2.
    # 存放相似度結果的列表
    similar_list = list()
    for i in range(0, len(hist_list)):
        cnt = -1  # 計算相似個數(採逐一比較, 自己和自己比一定相似, 所以要減一)
        for j in range(0, len(hist_list)):
            sim = cv2.compareHist(hist_list[i], hist_list[j],
                                  cv2.HISTCMP_CORREL)
            if sim >= similar_condi:
                cnt += 1
        similar_list.append(cnt)

    # 當前的個數 - 最小相似個數 <= sample 數量 // 2
    for index, coord in enumerate(sample_coord):
        y, x = coord
        if similar_list[index] - min(similar_list) <= (max(similar_list) -
                                                       min(similar_list)) // 2:
            markers[y:y + height, x:x + width] = 255
            cv2.rectangle(gray_img, (x, y), (x + width, y + height), (0, 0, 0),
                          2)

    # 加上天空的座標
    for x in range(0, gray_img.shape[1], width):
        markers[10:10 + height, x + width:x - width] = 255
    cv2.imshow('sample region', gray_img)

    # 3.
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
    markers = cv2.erode(markers, kernel, iterations=1)
    # cv2.imshow('markers', markers)
    return markers
Example #45
0
def mainLoop():
    global updatedFlag, continueFlag, thisFrame  #,table,f2

    # Stablization config
    delK_THRESHOLD = 0.7
    MAX_KEEP_CONST = 5
    CACHE_CONST = 5

    # IMG config
    IMG_SPLIT_PRECISION = 40
    FRAME_SIZE_INDEX = 1

    # cam setup

    # Runtime Setup
    k_cache = []
    last_k = 0
    last_b = 0
    keep_count = 0
    idx = 0

    while (1):
        #print('running loop')

        # if idx%10 == 0:
        #     checkAll()
        # if not (checkAll() and updatedFlag and doImgProcess):
        if not (checkAll() and updatedFlag):
            continue

        #print(['updatedFlag',updatedFlag])

        idx = idx + 1
        # get a frame
        frame = thisFrame
        updatedFlag = 0
        height, width = frame.shape[:2]
        frame = cv2.resize(frame, (math.floor(
            FRAME_SIZE_INDEX * width), math.floor(FRAME_SIZE_INDEX * height)))
        height, width = frame.shape[:2]
        cv2.imshow("capture", frame)
        # FIND-INTERSECTIONS & RAW POINTS
        # Read all intersect found
        # Generate all Y Index
        y_indices = [
            x / IMG_SPLIT_PRECISION for x in range(1, IMG_SPLIT_PRECISION)
        ]

        BRIGHTNESS_CONST = 245

        blur = cv2.GaussianBlur(gray(frame), (11, 11), 0)
        _, thresh = cv2.threshold(blur, BRIGHTNESS_CONST, 255,
                                  cv2.THRESH_BINARY)
        eroded = cv2.erode(thresh, None, iterations=5)
        final = eroded

        # pool = Pool(1)
        ##
        # results = []
        # [results.append(pool.apply_async(findForSingleIndex, args=(final,y_index))) for y_index in y_indices]
        ##
        # pool.close()
        # pool.join()
        ##
        # raw_pts=[]
        #        [raw_pts.extend(li) for li in [result.get() for result in results]]

        raw_pts = []
        [
            raw_pts.extend(li) for li in
            [findForSingleIndex(final, y_index) for y_index in y_indices]
        ]

        if len(raw_pts) == 0:
            pass
            #print("no any point found, failed")
        else:

            Xraw, Yraw = [pt[0] for pt in raw_pts], [pt[1] for pt in raw_pts]
            X, Y = find_best_pt_group(Xraw, Yraw)
            line_param = find_line_parameters(X, Y)

            if line_param != 0:
                k, b = line_param
                if len(k_cache) == 0 or keep_count > MAX_KEEP_CONST:
                    k_cache = [
                        abs(math.atan(k)) for _ in range(0, CACHE_CONST)
                    ]
                    keep_count = 0
                elif abs(np.mean(k_cache) -
                         math.atan(k)) < delK_THRESHOLD or abs(
                             np.mean(k_cache) + math.atan(k)) < delK_THRESHOLD:
                    k_cache.pop(0)
                    k_cache.append(abs(math.atan(k)))
                    line_p1 = (0, math.floor(b))
                    line_p2 = (1000, math.floor(1000 * k + b))
                    cv2.line(frame, line_p1, line_p2, (255, 0, 0), 2)

                    last_k = k
                    last_b = b
                else:
                    #print('break threshold')
                    keep_count = keep_count + 1
                    k, b = last_k, last_b
                    line_p1 = (0, math.floor(b))
                    line_p2 = (1000, math.floor(1000 * k + b))
                    cv2.line(frame, line_p1, line_p2, (255, 0, 0), 2)

                print(k, b, height, width, idx)
                ctrlParamList = paramProcess(k, b, height, width, idx)

                displayUpdate(displayPins['IMGPROCESS'], mode='blink')
                for param in ctrlParamList:
                    # f2.read()
                    # f2.write("{0} ".format(param[1]))

                    print("{0} {1}".format(param[0], param[1]))
                    try:
                        table.putNumber(param[0], param[1])

                    except:
                        pass
                        #print('no NetworkTable')

                # f2.read()
                # f2.write("\n")

            cv2.imshow("capture", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):

            break
        # while(1):
        #     if cv2.waitKey(1) & 0xFF == ord('w'):
        #         break

    continueFlag = 0
    frameThread.join(timeout=1)
    # networkTableSetupThread.join(timeout = 1)
    # networkReconnectThread.join(timeout=5)
    cap.release()
    cv2.destroyAllWindows()
    resetPins()
    gp.cleanup()
    return 1
import cv2 as cv
import numpy as np
from ImageUtils import MultiplePlot
from ImageUtils.utils import threshold

drawer = MultiplePlot([10, 10], [1, 2])

original = cv.imread('rise.jpg', 0)

thresholded = threshold(original, 115)

opened = cv.morphologyEx(thresholded, cv.MORPH_OPEN, np.ones((7, 7)))
erosion = cv.erode(opened, np.ones((5, 5)), iterations=1)
res = opened - erosion
res[res == 1] = 200

drawer.add(original, "Boundary")

drawer.add(res, "After")

cv.imwrite("rise_hollow.jpg", res)

drawer.show()
Example #47
0
    def load_trav_map(self):
        self.floor_map = []
        self.floor_graph = []
        for f in range(len(self.floors)):
            trav_map = np.array(
                Image.open(
                    os.path.join(get_model_path(self.model_id),
                                 'floor_trav_{}.png'.format(f))))
            obstacle_map = np.array(
                Image.open(
                    os.path.join(get_model_path(self.model_id),
                                 'floor_{}.png'.format(f))))
            if self.trav_map_original_size is None:
                height, width = trav_map.shape
                assert height == width, 'trav map is not a square'
                self.trav_map_original_size = height
                self.trav_map_size = int(self.trav_map_original_size *
                                         self.trav_map_default_resolution /
                                         self.trav_map_resolution)
            trav_map[obstacle_map == 0] = 0
            trav_map = cv2.resize(trav_map,
                                  (self.trav_map_size, self.trav_map_size))
            trav_map = cv2.erode(
                trav_map,
                np.ones((self.trav_map_erosion, self.trav_map_erosion)))
            trav_map[trav_map < 255] = 0

            if self.build_graph:
                graph_file = os.path.join(get_model_path(self.model_id),
                                          'floor_trav_{}.p'.format(f))
                if os.path.isfile(graph_file):
                    print("load traversable graph")
                    with open(graph_file, 'rb') as pfile:
                        g = pickle.load(pfile)
                else:
                    print("build traversable graph")
                    g = nx.Graph()
                    for i in range(self.trav_map_size):
                        for j in range(self.trav_map_size):
                            if trav_map[i, j] > 0:
                                g.add_node((i, j))
                                # 8-connected graph
                                neighbors = [(i - 1, j - 1), (i, j - 1),
                                             (i + 1, j - 1), (i - 1, j)]
                                for n in neighbors:
                                    if 0 <= n[0] < self.trav_map_size and 0 <= n[
                                            1] < self.trav_map_size and trav_map[
                                                n[0], n[1]] > 0:
                                        g.add_edge(n, (i, j),
                                                   weight=l2_distance(
                                                       n, (i, j)))

                    # only take the largest connected component
                    largest_cc = max(nx.connected_components(g), key=len)
                    g = g.subgraph(largest_cc).copy()
                    with open(graph_file, 'wb') as pfile:
                        pickle.dump(g, pfile, protocol=pickle.HIGHEST_PROTOCOL)

                self.floor_graph.append(g)
                # update trav_map accordingly
                trav_map[:, :] = 0
                for node in g.nodes:
                    trav_map[node[0], node[1]] = 255

            self.floor_map.append(trav_map)
import numpy as np
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to the image")
ap.add_argument('-w', "--width", type=float, required=True, help="width")
args = vars(ap.parse_args())

image = cv2.imread(args['image'])
resize = Helpers.resize(image, width=800)
gray = cv2.cvtColor(resize, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (3, 3), 0)
edged = cv2.Canny(gray, 0, 70)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)

cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = Helpers.grab_contours(cnts)
(cnts, _) = Helpers.sort_contours(cnts)
refObj = None


def draw_circle(xy, r, color):
    cv2.circle(copy, (int(xy[0]), int(xy[1])), r, color, -1)

    gradient1 = cv2.subtract(gradX1, gradY1)
    gradient1 = cv2.convertScaleAbs(gradient1)
    
    gradX2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=1, dy=0)
    gradY2 = cv2.Sobel(blurred2, ddepth=cv2.CV_32F, dx=0, dy=1)

    gradient2 = cv2.subtract(gradX2, gradY2)
    gradient2 = cv2.convertScaleAbs(gradient2)
    
    blurred = cv2.GaussianBlur(gradient1, (9, 9),0)
    (_, thresh) = cv2.threshold(blurred, 225, 0, 4)
    (_, thresh) = cv2.threshold(thresh, 30, 0, 3)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    closed = cv2.erode(closed, None, iterations=4)
    closed = cv2.dilate(closed, None, iterations=4)
    (_, cnts, _) = cv2.findContours(closed.copy(), 
        cv2.RETR_LIST, 
        cv2.CHAIN_APPROX_SIMPLE)
    c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]

    rect = cv2.minAreaRect(c)

    box = np.int0(cv2.boxPoints(rect))

    #dist1 = np.linalg.norm(box[0] - box[-1])
    #print (dist1)
    #if dist1 <100:
    draw_img1 = cv2.drawContours(img1.copy(), [box], -1, (255, 0, 0), 15)
    #else:
Example #50
0
    def on_any_event(event):
        if event.is_directory:
            return None
        elif event.event_type == 'created':

            shutil.rmtree('/home/diego/Desktop/Output')
            os.mkdir('/home/diego/Desktop/Output')

            shutil.rmtree('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test')
            os.mkdir('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test')

            shutil.rmtree('/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images')
            os.mkdir('/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images')

            shutil.rmtree('/home/diego/Desktop/Output_ToStich')
            os.mkdir('/home/diego/Desktop/Output_ToStich')

            ## look in INPUT folder, crop photo and save crop to OUTPUT folder
            def load_data_make_jpeg(folder):
                list = glob.glob(folder)
                for entry in list:
                    img_size = (256, 256, 3)
                    img_new = io.imread(entry)
                    img_new = (img_new / 256).astype('uint8')
                    shape = img_new.shape
                    height = shape[0] // 256
                    height256 = height * 256
                    width = shape[1] // 256
                    width256 = width * 256

                    img_new = img_new[:height256, :width256, :3]
                    img_new_w = view_as_blocks(img_new, img_size)
                    img_new_w = np.uint8(img_new_w)
                    imageio.imwrite('/home/diego/Desktop/Output_Final/' + 'CroppedVersion' + '.png', img_new)
                    r = 0
                    for i in range(img_new_w.shape[0]):
                        for j in range(img_new_w.shape[1]):
                            A = np.zeros((img_size[0], img_size[1], 3))
                            A[:, :, :] = img_new_w[i, j, :, :]
                            A = np.uint8(A)
                            imageio.imwrite('/home/diego/Desktop/Output/' + str(r) + '.png', A)
                            r += 1
                return width, height

            ## Cut up in order, append white images
            width, height = load_data_make_jpeg('/home/diego/Desktop/Input/*.*')

            def combine_white(folderA):
                os.chdir(folderA)
                for file in os.listdir(folderA):
                    imA = io.imread(file)
                    newimage = np.concatenate((imA, white), axis=1)
                    imageio.imwrite('/home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/test/' + file,
                                    newimage)

            white = io.imread('/home/diego/Desktop/White/white.png')

            combine_white('/home/diego/Desktop/Output/')

            ## Save that dataset to PIX2PIX/datasets/___

            ## Run PIX2PIX network
            os.system(
                'python3 /home/diego/MPFI/gold_particles/PIX2PIX/test.py --dataroot /home/diego/MPFI/gold_particles/PIX2PIX/datasets/Output_Appended/ --name Oct30pix2pix --model pix2pix --direction AtoB --num_test 1000000 --checkpoints_dir /home/diego/MPFI/gold_particles/PIX2PIX/checkpoints/ --results_dir /home/diego/MPFI/gold_particles/PIX2PIX/results/')
            ## Take only the Fake_B photos and stich together
            list = glob.glob(
                '/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images/*_fake_B.png')
            ## Save to OUTPUT folder
            for entry in list:
                split_name = entry.split('/')
                dirA = '/home/diego/MPFI/gold_particles/PIX2PIX/results/Oct30pix2pix/test_latest/images/'
                pathA = os.path.join(dirA, split_name[10])
                dirB = '/home/diego/Desktop/Output_ToStich/'
                pathB = os.path.join(dirB, split_name[10])
                shutil.move(pathA, pathB)

            ## STICH TOGETHER

            widthdiv256 = width
            heighttimeswidth = width * height

            folderstart = '/home/diego/Desktop/Output_ToStich/'

            def stitch_row(n):
                file1 = np.array(Image.open(folderstart + master[n]))
                file2 = np.array(Image.open(folderstart + master[n + 1]))
                full_row = np.concatenate((file1, file2), axis=1)
                for i in range(n + 2, n + widthdiv256):
                    file_next = np.array(Image.open(folderstart + master[i]))
                    full_row = np.concatenate((full_row, file_next), axis=1)
                return full_row

            files = os.listdir(folderstart)
            list = []
            for file in files:
                split_name = re.split('\D', file)

                list.append(split_name[0])

            list.sort(key=float)
            master = []
            for file in list:
                name = file + '_fake_B.png'
                master.append(name)

            picture = stitch_row(0)
            for n in range(widthdiv256, heighttimeswidth, widthdiv256):
                next_row = stitch_row(n)
                picture = np.concatenate((picture, next_row), axis=0)

            imageio.imwrite('/home/diego/Desktop/Output_Final/OutputStitched.png', picture)

            ## Count All Green Dots
            img = cv2.imread('/home/diego/Desktop/Output_Final/OutputStitched.png')

            lower_green = np.array([0, 245, 0])
            upper_green = np.array([40, 255, 40])

            mask = cv2.inRange(img, lower_green, upper_green)
            kernel = np.ones((5, 5), np.uint8)
            e = cv2.erode(mask, kernel, iterations=1)
            d = cv2.dilate(e, kernel, iterations=1)

            cnts = cv2.findContours(d, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            results = pd.DataFrame(columns=['X', 'Y'])

            for c in cnts:
                M = cv2.moments(c)
                if M["m00"] != 0:
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])
                else:
                    M["m00"] = 1
                    cX = int(M["m10"] / M["m00"])
                    cY = int(M["m01"] / M["m00"])

                if (cX != 0 or cY != 0):
                    results = results.append({'X': cX, 'Y': cY}, ignore_index=True)
                    # cv2.circle(newlabeled, (cX, cY), 2,(255,255,255), -1)
                cv2.putText(d, "center", (cX - 4, cY - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)

            export_csv = results.to_csv(r'/home/diego/Desktop/Output_Final/Results.csv', index=None, header=True)

            shutil.rmtree('/home/diego/Desktop/Input')
            os.mkdir('/home/diego/Desktop/Input')


        elif event.event_type == 'modified':
Example #51
0
    def run(self):
        global cycle,ret,frame,delay
        global start
        global player
        sum=0
        cnt_sum=0

        self.cap=cv2.VideoCapture(-1)
        cv2.startWindowThread()
        cv2.namedWindow("camera")
        while self._running:
            ret,frame=self.cap.read()
            cv2.imshow("camera",frame)

            if start==2:
                bgModel = cv2.createBackgroundSubtractorMOG2(200, bgSubThreshold)
                img_removebg = self.removeBG(bgModel,frame)
                img = img_removebg



                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)


                # convert it to the HSV color space,
        	    # and determine the HSV pixel intensities that fall into
        	    # the speicifed upper and lower boundaries
                hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                #cv2.imshow("hsv",hsv)

                skinMask = cv2.inRange(hsv, skin_lower, skin_upper)
                skinMask = cv2.erode(skinMask, None, iterations=1)
                skinMask = cv2.dilate(skinMask, None, iterations=1)

                #cv2.imshow("skinMask",skinMask)



                blur = cv2.GaussianBlur(skinMask, (blurValue, blurValue), 0)

                #cv2.imshow('blur', blur)
                ret, thresh = cv2.threshold(skinMask, threshold, 255, cv2.THRESH_BINARY)
                #cv2.imshow('ori', thresh)


                #cv2.imshow('ori_right', thresh_right)

                # get the coutours
                thresh1 = copy.deepcopy(thresh)
                _,contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)



                length = len(contours)
                maxArea = -1



                if length > 0:
                    res = max(contours, key=cv2.contourArea)
                    cnt = self.calculateFingertip(res)
                    sum+=cnt
                    cnt_sum+=1
                    if cnt_sum%10==0:
                        finger=sum/10
                        print("finger = "+str(finger))

                        if finger==0:
                            player=-1
                        elif finger==1:
                            player=0
                        elif finger<=3:
                            player=2
                        else :
                            player=1
                        sum=0
                        cnt_sum=0

            else:
                sum=0
                cnt_sum=0

            time.sleep(delay)
Example #52
0
def prepareImage(c, rp) :
    (name, cit, scale, offset, thresh, eit, dit, amp, frac) = rp
    mono = amplify(amp, c, fraction=frac)
    (ret,cimg) = cv2.threshold(mono, thresh, 255, cv2.THRESH_BINARY)
    return cv2.erode(cimg,np.ones((3,1),np.uint8),3)
Example #53
0
def img_process(img_seg):
    #w = seg_ind.shape[0]
    #h = seg_ind.shape[1]
    img_seg_copy = np.zeros(img_seg.shape, np.uint8)
    img_seg_copy = img_seg.copy()
    for i in range(img_seg.shape[0]):
        for j in range(img_seg.shape[1]):
            '''
            if(seg_ind[i,j] == 4.0):
                img_seg_copy[j,i,:] = 255
            else:
                img_seg_copy[j,i,:]= 0
            '''
            if ((img_seg[i, j, :] == np.array([128, 64, 128])).all()):
                img_seg_copy[i, j, :] = np.array([255, 255, 255])
            else:
                img_seg_copy[i, j, :] = np.array([0, 0, 0])

    cv2.imshow("road_area", img_seg_copy)
    cv2.waitKey(1000)
    size = 7

    kernel = np.ones((size, size), dtype=np.uint8)
    img_close = cv2.erode(cv2.dilate(img_seg_copy, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_close = cv2.erode(cv2.dilate(img_close, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_close, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    img_open = cv2.dilate(cv2.erode(img_open, kernel), kernel)
    cv2.imshow("img_seg_process", img_open)
    cv2.waitKey(1000)
    return img_open
Example #54
0
    tracker = Tracker()

    # PLAVA LINIJA

    # maska za plavu liniju
    maska_za_plavu = cv2.inRange(frame, np.array([180, 0, 0]),
                                 np.array([255, 50, 50]))
    frame_plava = cv2.bitwise_and(frame_plava,
                                  frame_plava,
                                  mask=maska_za_plavu)
    #cv2.imshow('Plava linija', frame_plava)

    # 76%
    erosion_plava = cv2.erode(frame_plava,
                              np.ones((3, 3), np.uint8),
                              iterations=1)
    dilation_plava = cv2.dilate(erosion_plava, np.ones((3, 3)), iterations=1)
    gray_plava = cv2.cvtColor(dilation_plava, cv2.COLOR_BGR2GRAY)

    # 75%
    # gray_plava = cv2.cvtColor(frame_plava, cv2.COLOR_BGR2GRAY)

    # 66%
    # ret, thresh_plava = cv2.threshold(gray_plava, 0, 255, cv2.THRESH_BINARY)
    # erosion_plava = cv2.erode(thresh_plava, np.ones((3, 3), np.uint8), iterations=1)

    edges_plava = cv2.Canny(gray_plava, 50, 150, apertureSize=3)
    # gblur_plava = cv2.GaussianBlur(edges_plava, (3, 3), 1)
    # cv2.imshow('Plava linija', gblur_plava)
    plava_linija = cv2.HoughLinesP(edges_plava,
Example #55
0
import cv2
import numpy as np
##resize image
## 5,5 adalah kernel size
kernel = np.ones((5, 5), np.uint8)
img = cv2.imread("Picture/bola.jpg")
imgresize = cv2.resize(img, (100, 100))
print(img.shape)
# hasilny print adalah(tinggi, lebar, rgb) yang bergubna untuk melihat bentuk gambar
# img cropped
# 50:600 adlah ukuran tinggi yang ingin dipotong, kemuadian lebar yang ingin dipotong.
imgcropped = img[50:600, 300:900]

imggreycroped = cv2.cvtColor(imgcropped, cv2.COLOR_BGR2GRAY)
imgcropedbw = cv2.Canny(imggreycroped, 100, 100)
imgdilatebw = cv2.dilate(imgcropedbw, kernel, iterations=1)
imgerodedbw = cv2.erode(imgdilatebw, kernel, iterations=1)

##--- hasil gambar potong/crop---##
cv2.imshow("img cropped", imgcropped)
##----gambar yang asli----##
cv2.imshow("img resize", img)
##---ekstraksi gambar----##
cv2.imshow("img erode", imgerodedbw)
cv2.waitKey(0)
def boundingbox():
    X = []
    for im in image_list():
        img = cv2.fastNlMeansDenoisingColored(im, None, 10, 10, 7, 21)  # 노이즈 제거
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        kernel = np.ones((3, 3), np.uint8)
        dila = cv2.dilate(gray, kernel, iterations=2)
        erod = cv2.erode(dila, kernel, iterations=2)
        blur = cv2.GaussianBlur(erod, (5, 5), 0)
        thresh = cv2.adaptiveThreshold(blur, 255, 1, 1, 11, 2)

        contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
        rect = []
        rects = []
        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            xw = x + w
            yh = y + h
            rect.append([x, y, w, h, xw, yh])
        x = rect[0][0]
        y = rect[0][1]
        xw = rect[0][4]
        yh = rect[0][5]
        for i in range(1, len(rect)):
            if x > rect[i][0]:
                x = rect[i][0]
            if y > rect[i][1]:
                y = rect[i][1]
            if xw < rect[i][4]:
                xw = rect[i][4]
            if yh < rect[i][5]:
                yh = rect[i][5]
            w = xw - x
            h = yh - y
        rects.append((x, y, w, h, xw, yh))
        for r in rects:
            x, y, w, h, xw, yh = r
        num = gray[y:yh, x:xw]
        num = 255 - num
        ww = round((w if w > h else h) * 1.85)
        spc = np.zeros((ww, ww))
        wy = (ww - h) // 2
        wx = (ww - w) // 2
        spc[wy:wy+h, wx:wx+w] = num
        num = cv2.resize(spc, (128, 128))
        #cv2.imwrite(str(con)+"-num.PNG", num)
        X.append(num)
        #print(y, yh, yh-y, x, xw, xw-x)
        #print(wy, wy+h, h, wx, wx+w, w)
        #cv2.waitKey(0)
        #red = (0, 0, 255)
        #cv2.rectangle(img, (x, y), (xw, yh), red, 2)
        #cv2.imshow("img", img)
        #cv2.rectangle(img, (x, y), (xw, yh), red, 2)
        #print(x, y, xw, yh)
        #red = (0, 0, 255)
        #cv2.rectangle(im, (x, y), (xw, yh), red, 2)
        #cv2.imshow("im", im)
        #cv2.waitKey(0)

    return X
import cv2
import numpy as np
img = cv2.imread("images/image.jpg")
ker = np.ones((5, 5), np.uint8)
new_img = cv2.erode(img, ker, iterations=1)
# cv2.imwrite("erosion.jpg", new_img)
cv2.imshow("erosion", new_img)

cv2.waitKey(0)
cv2.destroyAllWindows()
Example #58
0
def candidate_process(img, densitys, rects, fg_mask):
    global bias
    scann_width = 5
    rows, cols = img.shape
    min_val = img.min()
    new_rects = list()
    for idx, rect in enumerate(rects):
        print('\rcandidate: {}/{}'.format(idx, len(rects)), end='')
        x, y, w, h = rect
        if w > h:
            x0 = max(0, x - w // 2)
            y0 = max(0, y - w // 2)
            x1 = min(cols, x0 + w * 2)
            y1 = min(rows, y0 + w * 2)
        else:
            x0 = max(0, x - h // 2)
            y0 = max(0, y - h // 2)
            x1 = min(cols, x0 + h * 2)
            y1 = min(rows, y0 + h * 2)
        '''    
        x0 = max(0, x-w)
        y0 = max(0, y-h)
        x1 = min(cols, x0+w*3)
        y1 = min(rows, y0+h*3)
        '''
        sub_img = img[y0:y1, x0:x1]
        sub_fg_mask = fg_mask[y0:y1, x0:x1]
        res = np.zeros(sub_img.shape, dtype=np.uint8)
        average = _calculate_average(sub_img, min_val, modality='mode')
        average -= bias
        averages = np.ones(sub_img.shape) * average
        density = int(np.median(densitys[y0:y1, x0:x1]))

        avg_densitys = np.ones(sub_img.shape) * density
        sub_lines_h, sub_lines_v, _, _ = find_lines(sub_img,
                                                    averages,
                                                    avg_densitys,
                                                    scann_width=scann_width,
                                                    printf=False,
                                                    save_edge=True)

        scann_line_h = np.zeros(sub_img.shape, dtype=np.uint8)
        scann_line_v = scann_line_h.copy()

        for s, e, i in sub_lines_h:
            #if (e-s)*0.02 < 0.3:  # filter out scann line that less than 0.3m
            #    continue
            if (e - s) > 0.8 * (x1 - x0):
                continue
            scann_line_h[i, s:e + scann_width] = 255

        for s, e, i in sub_lines_v:
            #if (e-s)*0.02 < 0.1:  # filter out scann line that less than 0.3m
            #    continue
            if (e - s) > 0.8 * (y1 - y0):
                continue
            scann_line_v[s:e + scann_width, i] = 255

        scann_line_h = cv2.cvtColor(scann_line_h, cv2.COLOR_GRAY2BGR)
        gray = cv2.cvtColor(scann_line_h, cv2.COLOR_BGR2GRAY)
        #cv2.imwrite("56_scann_lines.png", bi_img)
        ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)

        verticalStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 5))
        vertical = cv2.erode(binary, verticalStructure)
        vertical = cv2.dilate(vertical, verticalStructure)

        # close holes to make it solid rectangle
        kernel = np.ones((5, 5), np.uint8)
        close_h = cv2.morphologyEx(vertical, cv2.MORPH_CLOSE, kernel)

        scann_line_v = cv2.cvtColor(scann_line_v, cv2.COLOR_GRAY2BGR)
        gray_v = cv2.cvtColor(scann_line_v, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray_v, 0, 255, cv2.THRESH_BINARY)

        hStructure = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 1))
        tmp = cv2.erode(binary, hStructure)
        tmp = cv2.dilate(tmp, hStructure)

        # close holes to make it solid rectangle
        kernel = np.ones((5, 5), np.uint8)
        close_v = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel)

        close = cv2.bitwise_and(close_v, close_h)
        close = cv2.bitwise_and(close, sub_fg_mask)

        _, contours, hierarchy = cv2.findContours(close, cv2.RETR_LIST,
                                                  cv2.CHAIN_APPROX_NONE)

        for c in contours:
            #x, y, w, h = cv2.boundingRect(c)
            xn, yn, wn, hn = cv2.boundingRect(c)
            xn += x0
            yn += y0 + density - 1
            new_rects.append(np.array([[xn, yn, xn + wn, yn + hn]]))
    return np.concatenate(new_rects, axis=0) if len(new_rects) > 0 else None
Example #59
0
def gesturenumbers():

    cap = cv2.VideoCapture(0)
    gameDisplay1 = pygame.display.set_mode((1, 1))
    while True:

        ret, img = cap.read()
        img = cv2.flip(img, 1)
        img = cv2.resize(img, (800, 600))
        cv2.rectangle(img, (350, 350), (100, 100), (0, 255, 0), 0)
        crop_img = img[100:350, 100:350]
        grey = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
        value = (5, 55)
        kernel = np.ones((3, 3), np.uint8)
        dilated = cv2.erode(grey, kernel, iterations=2)
        blurred = cv2.GaussianBlur(grey, value, 0)
        blurred1 = cv2.GaussianBlur(dilated, value, 0)
        _, thresh2 = cv2.threshold(blurred, 127, 255,
                                   cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
        _, thresh1 = cv2.threshold(blurred, 127, 255,
                                   cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)


        _,contours, hierarchy = cv2.findContours(thresh1.copy(),cv2.RETR_TREE, \
                cv2.CHAIN_APPROX_NONE)
        max_area = -1
        for i in range(len(contours)):
            cnt = contours[i]
            area = cv2.contourArea(cnt)
            if (area > max_area):
                max_area = area
                ci = i
        cnt = contours[ci]
        x, y, w, h = cv2.boundingRect(cnt)
        cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 0)
        hull = cv2.convexHull(cnt)
        drawing = np.zeros(crop_img.shape, np.uint8)
        cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 1)
        cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 2)
        hull1 = cv2.convexHull(cnt)
        areahull = cv2.contourArea(hull1)
        areacnt = cv2.contourArea(cnt)
        hull = cv2.convexHull(cnt, returnPoints=False)
        defects = cv2.convexityDefects(cnt, hull)
        count_defects = 0
        #find the percentage of area not covered by hand in convex hull
        arearatio = ((areahull - areacnt) / areacnt) * 100
        cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)
        for i in range(defects.shape[0]):
            s, e, f, d = defects[i, 0]
            start = tuple(cnt[s][0])
            end = tuple(cnt[e][0])
            far = tuple(cnt[f][0])
            a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
            b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
            c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
            angle = math.acos((b**2 + c**2 - a**2) / (2 * b * c)) * 57
            s = (a + b + c) / 2
            ar = math.sqrt(s * (s - a) * (s - b) * (s - c))
            d = (2 * ar) / a
            if angle <= 90 and d > 30:
                count_defects += 1
                cv2.circle(crop_img, far, 1, [0, 0, 255], -1)
            #dist = cv2.pointPolygonTest(cnt,far,True)
            cv2.line(crop_img, start, end, [0, 255, 0], 2)
            #cv2.circle(crop_img,far,5,[0,0,255],-1)
        if count_defects < 1:
            if areacnt < 2000:
                cv2.putText(img, 'Put hand in the box', (0, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
            else:
                if arearatio < 15:
                    cv2.putText(img, '0', (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,
                                2)

                # elif arearatio <25:
                #     cv2.putText(img,'All THE BEST',(0,50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                else:
                    cv2.putText(img, '1', (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2,
                                2)

        elif count_defects == 1:
            if 30 < arearatio < 40:
                arearatio
                # print(arearatio)
                cv2.putText(img, "2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
                # gesturewords()

            else:
                # print(arearatio)
                cv2.putText(img, "2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 2:
            str = "3"
            cv2.putText(img, "3", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 3:
            cv2.putText(img, "4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 4:
            cv2.putText(img, "5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 5:
            cv2.putText(img, "6", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
            gesturewords()
            break
        elif count_defects == 6:
            cv2.putText(img, "7 ", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 7:
            cv2.putText(img, "8", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 8:
            cv2.putText(img, "9 ", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        elif count_defects == 9:
            cv2.putText(img, "10", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        else:
            cv2.putText(img,"more", (50,50),\
                        cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
        #cv2.imshow('drawing', drawing)
        #cv2.imshow('end', crop_img)
        # cv2.imshow('Gesture', img)

        lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)

        #-----Splitting the LAB image to different channels-------------------------
        l, a, b = cv2.split(lab)

        #-----Applying CLAHE to L-channel-------------------------------------------
        clahe = cv2.createCLAHE(clipLimit=6.0, tileGridSize=(10, 10))
        cl = clahe.apply(l)

        #-----Merge the CLAHE enhanced L-channel with the a and b channel-----------
        limg = cv2.merge((cl, a, b))

        #-----Converting image from LAB Color model to RGB model--------------------
        img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
        cv2.imshow('Gesture', img)

        all_img = np.hstack((drawing, crop_img))
        # cv2.imshow('Contours', all_img)

        ch = cv2.waitKey(1)
        if ch & 0xFF == ord('q'):
            break
    gameDisplay1 = pygame.display.set_mode((width, height))
    cap.release()
    cv2.destroyAllWindows()
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np

# 读取图像
source = cv.imread('demo.png', cv.IMREAD_GRAYSCALE)

# 设置卷积核
kernel = np.ones((5, 5), np.uint8)

# 图像腐蚀
erode_img = cv.erode(source, kernel)

# 图像膨胀
dilate_result = cv.dilate(source, kernel)

# 显示结果
titles = ['Source Img', 'Erode Img', 'Dilate Img']
images = [source, erode_img, dilate_result]

# matplotlib 绘图
for i in range(3):
    plt.subplot(1, 3, i + 1), plt.imshow(images[i], 'gray')
    plt.title(titles[i])
    plt.xticks([]), plt.yticks([])

plt.show()