def overlayImages(self, img1, img2):

        img2 = img2[0:img1.shape[0],0:img1.shape[1],:]

        rows,cols,channels = img2.shape
        roi = img1[0:rows, 0:cols ]
        #cv2.imshow('roi', roi)
        #cv2.waitKey(5)

        # Now create a mask of logo and create its inverse mask also
        img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
        ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
        mask = mask[0:roi.shape[0], 0:roi.shape[1]]
        mask_inv = cv2.bitwise_not(mask)
        # Now black-out the area of logo in ROI
        img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
        
        # Take only region of logo from logo image.
        img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
        
        # Put logo in ROI and modify the main image
        dst = cv2.add(img1_bg,img2_fg)
        img1[0:rows, 0:cols ] = dst
        return img1

        """
        def get_camerainfo(self,msg):
        this gets the camera info and save it in the init variable.
        
        """

        """
def thresholdColor(img, colattr):
    (domchan, dommin, first, second) = colattr
    channels = cv2.split(img)#red, green, blue
    width, height, cha = img.shape
    mult = np.empty((width,height)).astype(np.uint8)
    mult.fill(255)
    red = channels[2].astype(np.uint8)
    green = channels[1].astype(np.uint8)
    blue = channels[0].astype(np.uint8)
    firsttype = np.zeros(img.shape)
    secondtype = np.zeros(img.shape)

    if domchan == "r":
        zerotype = (red > dommin)
        firsttype = np.true_divide(red,green)#r/g
        secondtype = np.true_divide(red,blue)#r/b
    elif domchan == "g":
        zerotype = (green > dommin)
        firsttype = np.true_divide(green,red)#g/r
        secondtype = np.true_divide(green,blue)#g/b

    zerotype = zerotype.astype(np.int)
    firsttype = (firsttype > first).astype(np.int)# & (firsttype < first[1])
    secondtype = (secondtype > second).astype(np.int)# & (secondtype < second[1])
    combined = cv2.bitwise_and(cv2.bitwise_and(zerotype, secondtype), firsttype)
    combined = cv2.multiply(combined.astype(np.uint8), mult)

    return combined
示例#3
0
 def updateBiggestObjectContour(self, frame, contour):
     
     if contour == None:
         return
         
     height, width = frame.shape[:2]
     
     c,r,w,h = cv2.boundingRect(contour)
     
     if w*h < 20:
         return
     # set up the ROI for tracking
     roi = frame[r:r+h, c:c+w]
     hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
     
     mask2 = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
     mask = np.zeros((height,width), np.uint8)
     cv2.drawContours(mask, [contour], 0, 255, -1)
     maskArea = mask[r:r+h,c:c+w]
     maskArea = cv2.bitwise_and(maskArea, mask2)
     img = cv2.bitwise_and(roi,roi,mask=maskArea)
     #cv2.imshow('maskarea', mask2)
     #roi_hist = cv2.calcHist([hsv_roi],[0,1,2],maskArea,[180,256,256],[0,180,0,256,0,256])
     roi_hist = cv2.calcHist([hsv_roi],[0,1],maskArea,[180,256],[0,180,0,256])
     #roi_hist = cv2.calcHist([hsv_roi],[0],maskArea,[180],[0,180])
     cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
     #print i
     self.biggestObject = (c,r,w,h)
     self.biggestObjectHistogram = roi_hist
示例#4
0
def apply_mask(img, mask, mask_color, device, debug=False):
  # Apply white image mask to image, with bitwise AND operator bitwise NOT operator and ADD operator
  # img = image object, color(RGB)
  # mask= image object, binary (black background with white object)
  # mask_color= white or black  
  # device = device number. Used to count steps in the pipeline
  # debug= True/False. If True, print image
  device += 1
  if mask_color=='white':
    # Mask image
    masked_img= cv2.bitwise_and(img,img, mask = mask)
    # Create inverted mask for background
    mask_inv=cv2.bitwise_not(mask)
    # Invert the background so that it is white, but apply mask_inv so you don't white out the plant
    white_mask= cv2.bitwise_not(masked_img,mask=mask_inv)
    # Add masked image to white background (can't just use mask_inv because that is a binary)
    white_masked= cv2.add(masked_img, white_mask)
    if debug:
      print_image(white_masked, (str(device) + '_wmasked.png'))
    return device, white_masked
  elif mask_color=='black':
    masked_img= cv2.bitwise_and(img,img, mask = mask)
    if debug:
      print_image(masked_img, (str(device) + '_bmasked.png'))
    return device, masked_img
  else:
      fatal_error('Mask Color' + str(mask_color) + ' is not "white" or "black"!')
def crop_waffle(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    greyscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    lower_yellow = np.array([0,50,50])
    upper_yellow = np.array([70,255,255])
    mask = cv2.inRange(hsv, np.uint8(lower_yellow), np.uint8(upper_yellow))
    kernel = np.ones((9,9),np.uint8)
    closed_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    masked_img = cv2.bitwise_and(greyscale,greyscale,mask = closed_mask)
    [contours,hiearchy] = cv2.findContours(masked_img,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
    #now find the largest contour
    max_area = 0
    max_contour = None
    for c in contours:
        #we change datatypes from numpy arrays to cv arrays and back because contour area only takes cv arrays.
        c = cv.fromarray(c)
        if cv.ContourArea(c) > max_area:
            max_contour = c
            max_area = cv.ContourArea(c)
    max_contour = np.asarray(max_contour)
    shape = img.shape
    largest_blob_mask = np.zeros((shape[0],shape[1],1),np.uint8)
    cv2.fillPoly(largest_blob_mask, pts =[max_contour], color=(255,255,255))
    print_rgb_hist(img,largest_blob_mask)
    return cv2.bitwise_and(img,img, mask= largest_blob_mask)
    def detect(self, frame):
        hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # split the video frame into color channels
        h, s, v = cv2.split(hsv_img)
        self.channels['hue'] = h
        self.channels['saturation'] = s
        self.channels['value'] = v

        # Threshold ranges of HSV components; storing the results in place
        self.threshold_image("hue")
        self.threshold_image("saturation")
        self.threshold_image("value")

        # Perform an AND on HSV components to identify the laser!
        self.channels['laser'] = cv2.bitwise_and(
            self.channels['hue'],
            self.channels['value']
        )
        self.channels['laser'] = cv2.bitwise_and(
            self.channels['saturation'],
            self.channels['laser']
        )

        # Merge the HSV components back together.
        hsv_image = cv2.merge([
            self.channels['hue'],
            self.channels['saturation'],
            self.channels['value'],
        ])

        self.track(frame, self.channels['laser'])

        return hsv_image
示例#7
0
    def updateContours(self, frame, contours):

        self.movingObjects = []
        self.histograms = []
        
        height, width = frame.shape[:2]
        
        i=0
        for contour in contours:
            c,r,w,h = cv2.boundingRect(contour)
            if w*h < 20:
				continue
			# set up the ROI for tracking
            roi = frame[r:r+h, c:c+w]
            hsv_roi =  cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            
            mask2 = cv2.inRange(hsv_roi, np.array((0., 30.,30.)), np.array((180.,250.,250.)))
            mask = np.zeros((height,width), np.uint8)
            cv2.drawContours(mask, [contour], 0, 255, -1)
            maskArea = mask[r:r+h,c:c+w]
            maskArea = cv2.bitwise_and(maskArea, mask2)
            img = cv2.bitwise_and(roi,roi,mask=maskArea)
            #cv2.imshow('maskarea', mask2)
            roi_hist = cv2.calcHist([hsv_roi],[0,1],maskArea,[180,256],[0,180,0,256])
            #roi_hist = cv2.calcHist([hsv_roi],[0],maskArea,[180],[0,180])
            cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
            #print i
            i+=1
            self.movingObjects.append((c,r,w,h))
            self.histograms.append(roi_hist)
示例#8
0
def cut_by_mask(mask,image):
    only_left = cv2.bitwise_and(image, image, mask = mask[0,:,:])
    only_right= cv2.bitwise_and(image, image, mask = mask[1,:,:])
    height, width, depth = image.shape
    only_left = crop(only_left, 0, width)
    only_right = crop(only_right,0, width)
    return only_left, only_right
def otsuTwo(img, img_file, man_img, mask=None):

    # blur = cv2.GaussianBlur(img, (5,5),0)
    blur = cv2.bilateralFilter(img, 5, 100, 100)

    thresholds = multithresholdOtsu(blur,mask)
    th1 = thresholds[0]
    th2 = thresholds[1]


    if mask is None:
        ret, thresh1 = cv2.threshold(blur,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(blur,th2,255,cv2.THRESH_BINARY_INV)
    else:
        combined_img = cv2.bitwise_and(blur, blur, mask=mask)
        ret, thresh1 = cv2.threshold(combined_img,th1,255,cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(combined_img,th2,255,cv2.THRESH_BINARY_INV)

    out_img_o = cv2.bitwise_and(thresh1, thresh2, mask=None)
    out_info_o = "_otsu_%d-%d" % (th1, th2)
    out_str_o = out_info_o + '.png'
    out_file_o = re.sub(r'\.jpg', out_str_o, img_file)
    cv2.imwrite(out_file_o, out_img_o)
    t = evaluation.findTotals(out_img_o, man_img)
    f = open('o2_all.txt', 'a')
    f.write(img_file + " " + str(t[0]) + " " + str(t[1]) + " " + str(t[2]) + " " + str(t[3]) + "\n")
    f.close()
示例#10
0
def backproject():
        im = cv2.imread("DSC_0869.JPG")
	#im = cv2.resize(im, None, fx = 0.25, fy = 0.25)
	
	#image => hsv, hist
	hsv = cv2.cvtColor( im, cv2.COLOR_BGR2HSV)
	#cv2.imshow("hsv", hsv)
	imHist = cv2.calcHist([hsv], [0,1], None, [180, 256],[0,180,0,256])

	bckP = cv2.calcBackProject([hsv], [0,1], imHist,[0,180,0,256], 1)
	#cv2.imshow("bp", bckP)
	kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (3,3))
	closing = cv2.morphologyEx(bckP, cv2.MORPH_CLOSE, kernel)
	#cv2.imshow("eroded", closing)

	##dst = cv2.filter2D(closing, -1,kernel)
	##cv2.imshow('2d', dst)

	ret,thresh = cv2.threshold(closing, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
	#cv2.imshow("thresh", thresh)

	fm1 =  cv2.merge((thresh,thresh,thresh))
	res1 = cv2.bitwise_and(im, fm1, mask = None)# mask here has no significance
        #cv2.imshow("first and", res1)
	
	#make (lower bound) G= 180 for proper target. G= 90 makes its edges disappear a leeettle
	mask = cv2.inRange(hsv, np.array([5,90,50], dtype = np.uint8), np.array([49,255,205], dtype = np.uint8)) 
	mask_inv = cv2.bitwise_not(mask)
	res = cv2.bitwise_and(res1, res1, mask = mask_inv)
	cv2.imwrite("final.jpg", res)
	kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
	res=cv2.erode(res,kernel,iterations=1)
	cv2.imwrite("bckP.JPG",res)
	extractblob(res)
示例#11
0
    def getFilteredImage(self, image):
        """
        * Internal function of run()

        * Filter given image
        * The pixels in laser part have value 255.
        * Otherwise, the pixels have value 0.
        """
        hsv_image = cv2.cvtColor(image, cv.CV_BGR2HSV)

        h, s, v = cv2.split(hsv_image)

        _, tmp = cv2.threshold(h, self.hue_max, 0, cv2.THRESH_TOZERO_INV)
        _, tmp = cv2.threshold(tmp, self.hue_min, 255, cv2.THRESH_BINARY)
        hue_th = cv2.bitwise_not(tmp)

        _, tmp = cv2.threshold(s, self.sat_max, 0, cv2.THRESH_TOZERO_INV)
        _, tmp = cv2.threshold(tmp, self.sat_min, 255, cv2.THRESH_BINARY)
        sat_th = tmp

        _, tmp = cv2.threshold(v, self.val_max, 0, cv2.THRESH_TOZERO_INV)
        _, tmp = cv2.threshold(tmp, self.val_min, 255, cv2.THRESH_BINARY)
        val_th = tmp

        tmp = cv2.bitwise_and (hue_th, sat_th)
        laser_img = cv2.bitwise_and (tmp, val_th)

        return laser_img
示例#12
0
    def preprocesing(self, image):
        """
        Makes a copy of input image then makes a threasholding operation
        so we can get mask of dominant color areas. Then applies
        that mask to every channel of output image.

        Args:
            image(numpy.ndarray): Image to process

        Returns:
            Image with boosted green channel
        """
        im = image.copy()
        self.main_col = cv.mean(im)[:3]
        c_boost = cv.inRange(image, (0.25*self.main_col[0],
                                     0.25*self.main_col[1],
                                     0.25*self.main_col[2]),
                                    (1.5*self.main_col[0],
                                     2.5*self.main_col[1],
                                     1.5*self.main_col[2]))
        im[:, :, 0] = cv.bitwise_and(image[:, :, 0], c_boost)
        im[:, :, 1] = cv.bitwise_and(image[:, :, 1], c_boost)
        im[:, :, 2] = cv.bitwise_and(image[:, :, 2], c_boost)

        if self.debug:
            cv.imshow("preprocesing", im)
            cv.imwrite("preprocesing.png", im)
        return im
示例#13
0
def process(imgroi):
    global img

    cv2.imshow("roi",imgroi)
    imgB, imgG, imgR = cv2.split(imgroi) 
    cv2.cvtColor(imgroi, cv2.COLOR_RGB2HSV);
    imgHSV = cv2.cvtColor(imgroi, cv2.COLOR_RGB2HSV)
    imgH, imgS, imgV = cv2.split(imgHSV) 
    filteredS = filter_high(imgS,gl_S)
    filteredV = filter_high(imgV,gl_V)
    filteredG = filter_high(imgG,gl_R)
    filteredR = filter_high(imgR,gl_G)
    filteredB = filter_high(imgB,gl_B)
    res = cv2.bitwise_and(filteredS,filteredV)
    res = cv2.bitwise_and(res,filteredG)
    res = cv2.bitwise_and(res,filteredR)
    res = cv2.bitwise_and(res,filteredB)
    res = cv2.blur(res,(5,5))
    res1 = res.copy()
    contours, hierarchy = cv2.findContours(res,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE)
    show_contours(contours)
    
    cv2.imshow("ori",img)
    cv2.imshow("res",res)
    cv2.imshow("res1",res1)
示例#14
0
 def ComputeDescriptors(self,RGB,Depth,dep_mask,h):
     dep = np.float32(Depth)
     dep_mask =cv2.bitwise_not(dep_mask)
     ret, mask = cv2.threshold(dep, 1.7, 1, cv2.THRESH_BINARY_INV)
     mask = np.uint8(mask)
     ret, mask2 = cv2.threshold(dep, 0.01, 1, cv2.THRESH_BINARY)
     mask2 = np.uint8(mask2)
     mask = cv2.bitwise_and(mask,mask2)
     mask = cv2.bitwise_and(mask,dep_mask)
     if h:
         masked_data = cv2.bitwise_and(RGB, RGB, mask=mask)
         masked_data = cv2.bitwise_and(masked_data, masked_data, mask=mask2)
         sp = cv2.cvtColor(masked_data, cv2.COLOR_RGB2GRAY)
         sp = cv2.GaussianBlur(sp, (5, 5),10)
         fd, imn = hog(dep, self.orientations, self.pixels_per_cell, self.cells_per_block,
                       self.visualize, self.normalize)
         if self.HogDepth:
             fdn,im = hog(sp, self.orientations, self.pixels_per_cell, self.cells_per_block,
                   self.visualize, self.normalize)
             fd = np.concatenate((fd, fdn))
     else:
         fd = []
     fgrid = np.array([])
     for i in xrange(4):
         for j in xrange(4):
             sub = RGB[25*i:25*(i+1),25*j:25*(j+1)]
             sub_mask = mask[25*i:25*(i+1),25*j:25*(j+1)]
             fsub = self.ComputeHC(sub,sub_mask)
             fgrid = np.concatenate((fgrid,fsub))
     fd2 = fgrid.copy()
     return fd,fd2,masked_data
示例#15
0
    def segmentate(self):
        self.reset()
        self.scale(2.0)
        self._img_orig = img_orig = self.img.copy()
        skew = self.skew(230, 255)
        if skew is None:
            print('Retry')
            skew = self.skew(20, 100)

        self.reset()
        #self.scale(2.0, cv2.INTER_NEAREST)
        self.scale(2.0, cv2.INTER_CUBIC)
        self.rotate(skew)

        self._closed = closed = self.threshold(self.gray(self.hsv_levels(0, 172, 0.21, level=2)))
        self._gray = gray = self.hsv_threshold()

        # levels1: 25-200
        # levels2: 0-106

        self._mask_and = mask_and = cv2.bitwise_and(255-gray, 255-gray, mask=closed)

        img_scale = cv2.bitwise_and(self.img, self.img, mask=closed)
        closed[:3] = 0
        closed[-3:] = 0
        closed[:,:3] = 0
        closed[:,-3:] = 0

        self._cnts, self._img_dbg = Grandbux.detect_contours(img_scale, closed)
示例#16
0
    def get_hsv(self, frame):
        """
        Tracking method 2.

        :param frame: frame to threshold it's hsv values.
        :return: hsv image, and image withing the threshold.
        """
        hsv = cv2.cvtColor(frame, cv2.cv.CV_BGR2HSV)
        hue, sat, val = cv2.split(hsv)

        (t, tmp) = cv2.threshold(hue, self.max_hue, 0, cv2.THRESH_TOZERO_INV)
        (t, hue) = cv2.threshold(tmp, self.min_hue, 255, cv2.THRESH_BINARY, hue)
        hue = cv2.bitwise_not(hue)

        (t, tmp) = cv2.threshold(sat, self.max_sat, 0, cv2.THRESH_TOZERO_INV)
        (t, sat) = cv2.threshold(tmp, self.min_sat, 255, cv2.THRESH_BINARY, sat)

        (t, tmp) = cv2.threshold(val, self.max_sat, 0, cv2.THRESH_TOZERO_INV)
        (t, val) = cv2.threshold(tmp, self.min_val, 255, cv2.THRESH_BINARY, val)

        if self.debug:
            cv2.imshow('sat', sat)
            cv2.imshow('hue', hue)
            cv2.imshow('val', val)

        laser = cv2.bitwise_and(hue, val)
        laser = cv2.bitwise_and(sat, laser, laser)

        hsv = cv2.merge([hue, sat, val])

        return hsv, laser
示例#17
0
def mergeImages(images, transforms):
    # Calculate the size of the final merged image
    minP, maxP = ( np.array([0, 0]), np.array([0, 0]) )
    for trans in transforms:
        width, height = images[trans.getSourceIndex()].width, images[trans.getSourceIndex()].height
        for point in ([0, 0], [0, height], [width, 0], [width, height]):
            current = trans.transform(point)
            for j in (0, 1):
                if current[j] < minP[j]:
                    minP[j] = current[j]
                if current[j] > maxP[j]:
                    maxP[j] = current[j]
    if minP[0] < 0 or minP[1] < 0:
        delta = -minP.clip(None, 0)
        for trans in transforms:
            trans.addDelta( delta )
    size = [ maxP[0] - minP[0], maxP[1] - minP[1] ]
    canvas = np.zeros( ( size[1], size[0], 3), np.uint8 )
    for i in range( 0, len(images) ):
        print "\tMerging image " + str(i)
        warped = cv2.warpPerspective( images[i].imgData, transforms[i].mat, ( size[0], size[1] ) )
        ret, mask = cv2.threshold( cv2.cvtColor( warped, cv2.COLOR_BGR2GRAY ), 0, 255, cv2.THRESH_BINARY )
        canvas_bg = cv2.bitwise_and( canvas, canvas, mask = cv2.bitwise_not(mask) )
        warped_fg = cv2.bitwise_and( warped, warped, mask = mask )
        cv2.add(canvas_bg, warped_fg, canvas)
    return canvas
示例#18
0
def filter_colors(frame=None, show_images=False, verbose=False):
    if frame is None:
        return False
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    fd = filters()
    lowers = fd["Lower"]
    uppers = fd["Upper"]
    color_filters = fd["Filters"]

    masks = {}
    results = {}
    medians = {}
    smoothed = {}
    kernel = np.ones((15,15), np.float32) / 225

    for color in color_filters:
        lower_color = np.array(lowers[color])
        upper_color = np.array(uppers[color])
        masks[color] = cv2.inRange(hsv, lower_color, upper_color)
        try:
            results[color] = cv2.bitwise_and(frame, mask_img, mask=masks[color])
        except NameError:
            results[color] = cv2.bitwise_and(frame, frame, mask=masks[color])
        medians[color] = cv2.medianBlur(results[color], 15)
        smoothed[color] = cv2.filter2D(results[color], -1, kernel)

    edges = cv2.Canny(frame, 80, 200)

    all_images = {"Frame": frame, "Masks": masks, "Results": results, "Colors": color_filters, "Edges": edges, "Median Blur": medians, "Smooth Blur": smoothed }

    if show_images:
        display_images(all_images=all_images)
    return all_images
示例#19
0
 def grid_contours(self, frame, contours, heir):
     result_cont, result_heir, result_rois = [], [], []
     logger.debug('segmenting contours with grid')
     for contour in contours:
         msk = numpy.zeros(frame.shape, dtype=frame.dtype)
         cv2.drawContours(msk, [contour], -1, 255, -1)
         bbox = cv2.boundingRect(contour)
         w0, h0 = bbox[0]//self.width, bbox[1]//self.width
         n_w = max(1, ((bbox[0]+bbox[2])//self.width) - w0)
         n_h = max(1, ((bbox[1]+bbox[3])//self.width) - h0)
         for i in range(n_w):
             for j in range(n_h):
                 grid_msk = numpy.zeros(frame.shape, dtype=frame.dtype)
                 grid_box = numpy.array([[(w0+i)*self.width, (h0+j)*self.width],
                                         [(w0+i+1)*self.width, (h0+j)*self.width],
                                         [(w0+i)*self.width, (h0+j+1)*self.width],
                                         [(w0+i+1)*self.width, (h0+j+1)*self.width]],
                                        dtype=numpy.uint8)
                 cv2.drawContours(grid_msk, [grid_box], -1, 255, -1)
                 grid_msk = cv2.bitwise_and(grid_msk, msk)
                 result_cont.append(grid_msk)
                 # todo: work out stats of new contour!
     # todo: mix grid with contours to form super pixels!
     contours = result_cont
     logger.debug('checking and removing overlap...')
     msk_all = numpy.zeros(frame.shape[:2], dtype=frame.dtype)
     for msk in contours:
         msk = cv2.bitwise_and(msk, cv2.bitwise_not(msk_all))
         if msk.sum() != 0:
             result_cont.append(msk)
             msk_all = numpy.min(255, cv2.add(msk_all, msk))
     logger.debug('grid contours complete')
     contours = result_cont
     return contours, heir, rois
示例#20
0
def hsv_threshold(img, hue_min, hue_max, sat_min, sat_max, val_min, val_max):
    """
    Threshold an HSV image given separate min/max values for each channel.
    :param img: an hsv image
    :param hue_min:
    :param hue_max:
    :param sat_min:
    :param sat_max:
    :param val_min:
    :param val_max:
    :return: result of the threshold (each binary channel AND'ed together)
    """

    hue, sat, val = cv2.split(img)

    hue_bin = np.zeros(hue.shape, dtype=np.uint8)
    sat_bin = np.zeros(sat.shape, dtype=np.uint8)
    val_bin = np.zeros(val.shape, dtype=np.uint8)

    cv2.inRange(hue, hue_min, hue_max, hue_bin)
    cv2.inRange(sat, sat_min, sat_max, sat_bin)
    cv2.inRange(val, val_min, val_max, val_bin)

    bin = np.copy(hue_bin)
    cv2.bitwise_and(sat_bin, bin, bin)
    cv2.bitwise_and(val_bin, bin, bin)

    return bin
示例#21
0
文件: wrp.py 项目: noodlebreak/Warper
    def show_final(self, src1, src2):
        """
        Function to add main image and transformed logo image and show final output.
        Icon image replaces the pixels of main image in this implementation.
        """

        self.gray = cv2.cvtColor(src2.astype('float32'), cv2.COLOR_BGR2GRAY)
        retval, self.gray = cv2.threshold(self.gray, 0, 255, cv2.THRESH_BINARY)
        # adaptiveThreshold(gray,gray,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,5,4);
        self.gray_inv = cv2.bitwise_not(self.gray.astype('uint8'))
        print("gray shape: {} gray_inv shape: {} \n".format(self.gray.shape, self.gray_inv.shape))

        # Works with bool masks
        self.src1final = cv2.bitwise_and(src1, src1, dst=self.src1final, mask=self.gray_inv)
        self.src2final = cv2.bitwise_and(src2, src2, self.gray)
        self.src2final = self.src2final.astype('uint8')

        # final_image = cv2.add(src1final, src2final)
        self.final_image = self.src1final + self.src2final
        self.save_transitions(src1, src2)

        cv2.imshow("output", self.final_image)
        #  Press "Escape button" to exit
        while True:
            key = cv2.waitKey(10) & 0xff
            if key == 27:
                break
        sys.exit(0)
def segmetacion_03(img_nir, img_ndvi): #con kmeans

    #d_blue = img_nir[:,:,1]
    d_nir = img_nir[:,:,2]
    veg_mask = s.segOtsu(img_ndvi)

    #mascara de nir y de ndvi
    veg_zones_ndvi = cv.bitwise_and(img_ndvi,img_ndvi,mask = veg_mask)
    veg_zones_nir = cv.bitwise_and(d_nir,d_nir,mask = veg_mask)
    veg_zones_nir_f32 = np.float32(veg_zones_nir);
    veg_zones_nir_f32 = veg_zones_nir_f32/255;
    k = 4;
    max_iter = 5;
    eps =.01;

    rows, cols = veg_zones_ndvi.shape[:2];
    p = np.zeros(( cols*rows,2));
    p[:,0] = veg_zones_ndvi.reshape(( cols*rows,1))[:,0];
    veg_zones_nir_f32 = veg_zones_nir_f32/255
    p[:,1] = veg_zones_nir_f32.reshape(( cols*rows,1))[:,0];
    p = np.float32(p)
    x = s.segmetarByKMeans(img_ndvi,p,k,max_iter,eps)
    cv.imshow('kmeans', x);
    cv.waitKey();

    return 0
示例#23
0
def add_img():
    img1 = cv2.imread('img3.png')
    img2 = cv2.imread(r"C:\Users\Goutham\Documents\GitHub\HackerRank\python\openCV\images_1.jpg")

    rows, cols, channels = img2.shape
    roi = img1[0:rows, 0:cols ]

    # Now create a mask of logo and create its inverse mask
    img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)

    # add a threshold
    ret, mask = cv2.threshold(img2gray, 220, 255, cv2.THRESH_BINARY_INV)

    mask_inv = cv2.bitwise_not(mask)

    # Now black-out the area of logo in ROI
    img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)

    # Take only region of logo from logo image.
    img2_fg = cv2.bitwise_and(img2,img2,mask = mask)

    dst = cv2.add(img1_bg,img2_fg)
    img1[0:rows, 0:cols ] = dst

    cv2.imshow('add',img1)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
def remove_abundant(image,mask):
	edges = cv2.bitwise_and(image,image,mask=mask);
	red_mean = np.average(edges[:,:,0], weights=edges[:,:,0].astype(bool));
	green_mean = np.average(edges[:,:,1], weights=edges[:,:,1].astype(bool));
	blue_mean = np.average(edges[:,:,2], weights=edges[:,:,2].astype(bool));
	im_temp = copy.copy(image);
	im = np.zeros(im_temp.shape,dtype='int');	
	im[:,:,0] = abs(im_temp[:,:,0] - red_mean);
	im[:,:,1] = abs(im_temp[:,:,1] - green_mean);
	im[:,:,2] = abs(im_temp[:,:,2] - blue_mean);
	im = im.astype('uint8');
	im = cv2.bitwise_and(im,im,mask=mask)	
	red_mean = np.average(im[:,:,0], weights=im[:,:,0].astype(bool));
	green_mean = np.average(im[:,:,1], weights=im[:,:,1].astype(bool));
	blue_mean = np.average(im[:,:,2], weights=im[:,:,2].astype(bool));
	ret1, grad0 = cv2.threshold (im[:,:,0],red_mean, 255, cv2.THRESH_BINARY);
	ret1, grad1 = cv2.threshold (im[:,:,1],green_mean, 255, cv2.THRESH_BINARY);
	ret1, grad2 = cv2.threshold (im[:,:,2],blue_mean, 255, cv2.THRESH_BINARY);
	grad = cv2.bitwise_and(grad0,grad1);
	grad = cv2.bitwise_and(grad,grad2);
	grad = morphology.binary_closing(grad,iterations=5);
	grad = morphology.binary_fill_holes(grad);
	grad = grad.astype('uint8');
	im = cv2.bitwise_and(image,image,mask=grad);	
	return (im,grad);
def remove_outliers(image,mask):
#taking the mask part to image to check the presence of bee
	im = cv2.bitwise_and(image,image,mask=mask);
	ldp_image,_,_ = ldp.ldp(im);
	test_Y = ldp_image.reshape((ldp_image.shape[0] * ldp_image.shape[1], ldp_image.shape[2]));
	test_rgb = im.reshape((im.shape[0] * im.shape[1], im.shape[2]));
	test = np.concatenate((test_Y,test_rgb),axis=1);
	mask_not = cv2.bitwise_not(mask);
	ret1, mask_not = cv2.threshold (mask_not,np.mean(mask_not), 255, cv2.THRESH_BINARY);		
	im = cv2.bitwise_and(image,image,mask=mask_not);
	ldp_image,_,_ = ldp.ldp(im);	
	data_ldp = ldp_image.reshape((ldp_image.shape[0] * ldp_image.shape[1], ldp_image.shape[2]));
	data_rgb = im.reshape((im.shape[0] * im.shape[1], im.shape[2]));
	data = np.concatenate((data_rgb,data_ldp),axis=1);
	data = data[np.any(data!=0,axis=1)];	
	print data.shape;		
	data = data.astype('float64');		
	data = preprocessing.normalize(data,axis=0);
	ss = StandardScaler();	
	data = ss.fit_transform(data);
	clf = svm.OneClassSVM(nu=0.8, kernel="rbf", gamma=0.1)
	clf.fit(data);
	test = test.astype('float64');		
	test = preprocessing.normalize(test,axis=0);	
	print test.shape;	
	test = ss.fit_transform(test);
	test = clf.predict(test);
	test = test.reshape((image.shape[0] , image.shape[1]));
	test[test==-1] = 0;
	test[test==1] = 255;
	test = test.astype('uint8');
	im = cv2.bitwise_and(image,image,mask=test);	
	im = cv2.bitwise_and(im,im,mask=mask);	
	#print test[:,0],test[:,1];	
	return(im,test);  
示例#26
0
def overlay(cyclist, scene, alpha=0):

    # compute image dimensions
    cheight, cwidth = cyclist.shape[:2]
    sheight, swidth = scene.shape[:2]

    # limit random placement to box
    i = np.random.randint((sheight - cheight) / 2) + ((sheight - cheight) / 4)
    j = np.random.randint((swidth - cwidth) / 2) + ((swidth - cwidth) / 4)

    roi = scene[i:i + cheight, j:j + cwidth]

    cyclist_gray = cv2.cvtColor(cyclist, cv2.COLOR_BGR2GRAY)
    # ret, mask = cv2.threshold(cyclist_gray, 253, 255, cv2.THRESH_BINARY_INV)
    ret, mask = cv2.threshold(cyclist_gray, 0, 255, cv2.THRESH_BINARY)
    mask_inv = cv2.bitwise_not(mask)

    scene_bg = cv2.bitwise_and(roi, roi, mask = mask_inv)
    cyclist_fg = cv2.bitwise_and(cyclist, cyclist, mask = mask)

    dst = cv2.add(scene_bg, cyclist_fg)

    img = scene.copy()
    img[i:i + cheight, j:j + cwidth] = dst

    return img
示例#27
0
def pegar_logo_en_imagen():
    # Cargo la imagen y el logo
    img = cv2.imread('RSCN1018.JPG')
    logo = cv2.imread('wiki_logo_p.JPG')

    # I want to put logo on top-left corner, So I create a ROI
    # Vamos a poner el logo en la esquina izquierda-arriba. Creamos una region de imagen (ROI)
    rows,cols,channels = logo.shape
    roi = img[0:rows, 0:cols ]

    # Now create a mask of logo and create its inverse mask also
    logogray = cv2.cvtColor(logo,cv2.COLOR_BGR2GRAY) # Escala de grises
    ret, mask = cv2.threshold(logogray, 10, 255, cv2.THRESH_BINARY) # Deja 0 en valores <= 10 y 255 en los mayores
    mask_inv = cv2.bitwise_not(mask) # Da vuelta los valores (0->255 y 255->0)

    # En mask queda el logo con blanco en la parte del dibujo y negro en el fondo
    # inv mask queda al reves de mask

    # Con esto dejamos en negro el logo dentro de la ROI
    img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)

    # Con esto dejamos en img2_fg solo la parte del logo
    img2_fg = cv2.bitwise_and(logo, logo, mask=mask)

    # Ponemos el logo en ROI
    dst = cv2.add(img1_bg, img2_fg)

    # Modificamos la imagen principal
    img[0:rows, 0:cols ] = dst

    cv2.imshow('res',img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
示例#28
0
    def skin_mask(self, img, det_face_hsv, face_rect):
        """
        Create a mask of the image which returns a binary image (black and white) based
        on whether we thing a section is skin or not. We do this by analyzing the hue and
        saturation from the detected face. From this we can calculate the probability of
        any pixel in the full image occuring in the face image. Then we can filter out
        any values whose probability is below a certain threshold.

        :param img: BGR image from webcam
        :param det_face_hsv: hsv image of the face from the previous detection
        :param face_rect: non-normalized dimensions of face rectangle (left, top, cols, rows)
        :return: 2D array, black and white if pixels are thought to be skin
        """
        #Get the HSV images of the whole thing and the face
        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        face_left = face_rect[0]
        face_top = face_rect[1]
        face_width = face_rect[2]
        face_height = face_rect[3]
        #create a Hue-Saturation histogram of the face
        hs_face_hist = cv2.calcHist([det_face_hsv], [0,1], None, [32,32], [0, 180,0, 255])
        cv2.normalize(hs_face_hist, hs_face_hist, 0, 255, cv2.NORM_MINMAX)
        #create a Hue-Saturation BackProjection, and a mask
        #This mask ignores dark pixels < 32, and saturated pixels, <60
        hue_min, sat_min, val_min = 0.0, 32.0, 16.0
        mask = cv2.inRange(img_hsv, np.array((hue_min, sat_min, val_min)), np.array((180., 255., 255.)))
        mask_face = mask[face_top:face_top+face_height, face_left:face_left+face_width]
        masked_hs_hist = cv2.calcHist([det_face_hsv], [0,1], mask_face, [32,32], [0, 180,0, 255])
        cv2.normalize(masked_hs_hist, masked_hs_hist, 0, 255, cv2.NORM_MINMAX)
        masked_hs_prob = cv2.calcBackProject([img_hsv], [0,1], masked_hs_hist, [0, 180,0, 255],1)
        cv2.bitwise_and(masked_hs_prob, mask, dst=masked_hs_prob) #seems to lessen noise???
        thresh = 8.0 #threshold likelihood for being skin, changes a lot based on setting
        _, masked_img = cv2.threshold(masked_hs_prob, thresh, 255, cv2.CV_8U) #throw out below thresh

        return masked_img
示例#29
0
def doCapture(name_video, name_class, name_lesson, period):

	if not os.path.isdir("./" + name_class + "/"):
		os.mkdir("./" + name_class + "/")
	if not os.path.isdir("./" + name_class + "/" + name_lesson + "/"):
		os.mkdir("./" + name_class + "/" + name_lesson + "/")

	camera = cv2.VideoCapture(name_video)
	_, frame1 = camera.read()
	_, frame2 = camera.read()
	temp = cv2.bitwise_xor(frame1, frame1)
	
	cc = list()
	picCount = 0
	framecount = 0
	
	while True:
		frame1 = frame2
		for i in range(frm):
			_, frame2 = camera.read()

		if not _:
			break
		
		gray1 = gray(frame1)
		gray2 = gray(frame2)
		dframe = cv2.absdiff(gray1, gray2)
		
		(_, mask) = cv2.threshold(dframe, 5, 255, cv2.THRESH_BINARY)
		mask = cv2.dilate(mask, None, iterations = 6)

		contours, hierarcy = cv2.findContours(mask, 1, 2) 

		cc.append(contours)
		if len(cc) > 5: cc.pop(0)
		
		for contours in cc:
			for c in contours:
				x, y, w, h = cv2.boundingRect(c)
				cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)
		
		new = cv2.bitwise_and(frame2, frame2, mask = cv2.bitwise_not(mask))
		old = cv2.bitwise_and(temp, temp, mask = mask)
		temp = cv2.add(new, old)
		
		cv2.imshow('original video', frame1)
		cv2.imshow('delta frame', dframe)
		cv2.imshow('mask', mask)
		cv2.imshow('record', temp)

		framecount = framecount + 1
		if framecount == (fps*period/frm):
			cv2.imwrite((name_class+'/'+name_lesson+'/'+'save{0}.jpg').format(picCount), temp)
			framecount = 0
			picCount = picCount + 1
			print('Captured...')
			
		cv2.waitKey(1)
		
	camera.release()
示例#30
0
def find_fitting_contour(image_name):
    image_name_path = path + image_name

    # path = 'img_test/'
    # img_shark_path = path + 'rocket.jpg'
    img = cv2.imread(image_name_path)
    img = cv2.resize(img, (500, 500))
    shape = list(img.shape)
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # 105
    sensitivity = 255
    lower_white = np.array([0, 0, 255 - sensitivity])
    upper_white = np.array([255, sensitivity, 255])
    mask = cv2.inRange(hsv, lower_white, upper_white)
    img = cv2.bitwise_and(img, img, mask=mask)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    se = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
    img = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, se)
    img = cv2.bitwise_and(img, img, mask=mask)

    generate_image_contour(img, image_name)

    points = []

    for i in range(shape[0]):
        for j in range(shape[1]):
            # print(img[i,j])
            if img[i, j] > 100:
                # print(i, j)
                points.append((j, i))
    return points
示例#31
0
    x3 = contours[2][0][0]
    y3 = contours[2][0][1]
    x4 = contours[3][0][0]
    y4 = contours[3][0][1]
    print("HII")
    print((x1, y1))
    print((x2, y2))
    print((x3, y3))
    print((x4, y4))

    mask = np.zeros(resized.shape, dtype=np.uint8)
    roi_corners = np.array([[(x1, y1), (x2, y2), (x3, y3), (x4,y4)]], dtype=np.int32)
    channel_count = resized.shape[2]
    ignore_mask_color = (255,) * channel_count
    cv2.fillPoly(mask, roi_corners, ignore_mask_color)
    masked_image = cv2.bitwise_and(resized, mask)

    pts1 = np.float32([(x3, y3), (x2, y2), (x4, y4), (x1,y1)])
    pts2 = np.float32([[0, 0], [500, 0], [0, 500], [500, 500]])
    M = cv2.getPerspectiveTransform(pts1, pts2)
    dst = cv2.warpPerspective(masked_image, M, (500, 500))
    dstblur = cv2.GaussianBlur(dst, (5, 5), 0)
    dst = cv2.addWeighted(dstblur,1.5,dst,-0.5,0)
    print(dst.shape)
    cv2.imshow("crop",dst)

    if ret == True:
        # writeframe = cv2.flip(dst, 0)
        out.write(dst)

    cv2.imshow('frame1', blurred)
示例#32
0
def processVideo(
    video_file,
    width,
    height,
    length,
    demo=False
) -> List[List[Tuple[Tuple[int, int], Tuple[int, int], Tuple[int, int], Tuple[
        int, int]]]]:
    """
    Get non-background bounding boxes every frame by GMG algorithm.
    Some initial filtering is used to filter boxes too small, (smaller than 100 square pixels)

    in the meantime, both background and contours are extracted and cached for later use.

    :return: boxes
    """

    allContrours = []

    allBoxes, caching = retrieveComputed()
    if allBoxes is None or demo:
        allBoxes = []
        caching = True
    else:
        return allBoxes

    assert os.path.isfile(video_file), "video file not found"

    video = cv2.VideoCapture(video_file)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()

    width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
    length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

    demoFrame = np.full((height * 2, width * 2, 3), 255, dtype=np.uint8)

    boxImages = dict()

    backgroundFrequencies = [[dict() for _ in range(width)]
                             for _ in range(height)]
    background = np.full((height, width, 3), 255, dtype=np.uint8)
    counter = 0

    sampling_min = int(0.136 * length)
    sampling_max = int(0.2 * length)

    while 1:
        ret, frame = video.read()

        print("Video processing progress: %d\r" %
              ((counter + 1) * 100 / length),
              end="")

        if ret:
            frameBoxes = []
            frameContours = dict()

            fgmask = fgbg.apply(frame)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
            _, th1 = cv2.threshold(fgmask, 127, 255, cv2.THRESH_BINARY)
            a, contours, *_ = cv2.findContours(
                th1, cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)  # showing the masked result
            # if counter // 50 == 0:
            if sampling_min <= counter <= sampling_max:

                bgmask = np.logical_not(fgmask)

                # cv2.waitKey()
                bg = cv2.bitwise_and(frame,
                                     frame,
                                     mask=bgmask.astype(np.uint8))
                if demo:
                    demoFrame = createCenteredDemoFrame(
                        bg,
                        "Processing Video: %d%%" %
                        ((counter + 1) * 100 / length),
                        "sampling background",
                    )

                for r in range(height):
                    for c in range(width):
                        if bgmask[r][c]:
                            p = tuple(bg[r][c])
                            k = backgroundFrequencies[r][c]
                            if p in k:
                                k[p] += 1
                            else:
                                k[p] = 1
            elif demo:
                showingFrame = np.full((height, width, 3), 255, dtype=np.uint8)

                demoFrame = createCenteredDemoFrame(
                    showingFrame,
                    "Processing Video: %d%%" % ((counter + 1) * 100 / length),
                    "",
                )
            if demo:
                cv2.imshow("demo", demoFrame)
                DEMO_FRAMES.append(demoFrame)
                cv2.waitKey(1)

            for i in range(len(contours)):
                if len(contours[i]) >= 5:

                    # geting the 4 points of rectangle

                    x, y, w, h = cv2.boundingRect(contours[i])
                    if w * h >= 100:
                        # upper-left upper-right lower-left lower-right
                        box = ((x, y), (x + w, y), (x, y + h), (x + w, y + h))
                        frameBoxes.append(box)
                        boxImages[(counter, ) + box] = frame[y:y + h, x:x + w]
                        frameContours[box] = contours[i]
            allContrours.append(frameContours)
            allBoxes.append(frameBoxes)

        else:
            break
        counter += 1
    print("Video processing progress: 100")

    video.release()

    for r in range(height):
        for c in range(width):
            px = tuple(backgroundFrequencies[r][c].items())
            if px:
                maxP = max(tuple(backgroundFrequencies[r][c].items()),
                           key=lambda x: x[1])[0]
            else:
                maxP = (255, 255, 255)
            background[r][c] = maxP

    if caching:
        if not os.path.exists("cached"):
            os.makedirs("cached")
        filename = os.path.join("cached", "trafficVideoBoxes.pickle")

        contoursFilename = os.path.join("cached", "contours.pickle")
        backgroundFilename = os.path.join("cached", "background.jpg")
        boxImagesFilename = os.path.join("cached", "boxImages.pickle")

        with open(filename, "wb") as file:
            pickle.dump(allBoxes, file)

        with open(contoursFilename, "wb") as file:
            pickle.dump(allContrours, file)

        with open(boxImagesFilename, "wb") as file:
            pickle.dump(boxImages, file)

        # print(background[0:10, 0:10])
        cv2.imwrite(backgroundFilename, background)

        print(
            "bounding boxes, contours, extracted background cached for later use"
        )

        print("video frame count: %d" % length)

    return allBoxes
示例#33
0
import cv2
import numpy as np

img = cv2.imread("C:\\Users\\kernel\\Pictures\\Saved Pictures\\meme.jpg")

square = np.zeros((300, 300), np.uint8)
cv2.rectangle(square, (50, 50), (250, 250), 255, -1)
cv2.imshow("Square", square)

ellipse = np.zeros((300, 300), np.uint8)
cv2.ellipse(ellipse, (150, 150), (150, 50), 45, 0, 360, 255, -1)
cv2.ellipse(ellipse, (150, 150), (150, 50), 135, 0, 360, 255, -1)
cv2.imshow("Ellipse", ellipse)

anded = cv2.bitwise_and(square, ellipse)
cv2.imshow("anded", anded)

ored = cv2.bitwise_or(square, ellipse)
cv2.imshow("ored", ored)

noted = cv2.bitwise_not(ellipse)
cv2.imshow("noted", noted)

xored = cv2.bitwise_xor(square, ellipse)
cv2.imshow("xored", xored)

cv2.waitKey(20000)
cv2.destroyAllWindows()
示例#34
0
import cv2
import numpy as np

img1 = cv2.imread('messi4.jpg')
img2 = cv2.imread('opencv.png')

row, column, dim = img2.shape
roi = img1[0:row, 0:column]

img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 155, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)

img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
img2_bg = cv2.bitwise_and(img2, img2, mask=mask)

dst = cv2.add(img1_bg, img2_bg)
img1[0:row, 0:column] = dst

cv2.imshow('ouptut', img1)

cv2.waitKey(0)
cv2.destroyAllWindows()
示例#35
0
    image = cv2.resize(image, (0, 0),
                       fx=0.5,
                       fy=0.5,
                       interpolation=cv2.INTER_LINEAR)
    proc = cv2.medianBlur(image, 7)

    ### Detect skin
    image_hsv = cv2.cvtColor(proc, cv2.COLOR_BGR2HSV)
    skin_like_mask = cv2.inRange(image_hsv, skin_range_min, skin_range_max)
    # Filter the skin mask :
    skin_mask = sieve(skin_like_mask, skin_sieve_min_size)
    kernel = np.ones((skin_kernel_size, skin_kernel_size), dtype=np.int8)
    skin_mask = cv2.morphologyEx(skin_mask, cv2.MORPH_CLOSE, kernel)
    # Apply skin mask
    skin_segm_rgb = cv2.bitwise_and(image, image, mask=skin_mask)

    ### Contours
    proc = cv2.Canny(proc, wheel_canny_threshold,
                     wheel_canny_ratio * wheel_canny_threshold)
    cv2.imshow("Proc", proc)

    contours = proc
    contours_rgb = np.zeros(contours.shape + (3, ), dtype=np.uint8)
    contours_rgb[:, :, 0] = 0
    contours_rgb[:, :, 1] = contours
    contours_rgb[:, :, 2] = 0

    ## Resulting image:
    result = skin_segm_rgb + contours_rgb
import cv2
import numpy as np

# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]

# Perform morphological hit or miss operation
kernel = np.array([[-1,-1,-1], [-1,1,-1], [-1,-1,-1]])
dot_mask = cv2.filter2D(thresh, -1, kernel)

# Bitwise-xor mask with binary image to remove dots
result = cv2.bitwise_xor(thresh, dot_mask)

# Dilate to fix damaged text pixels
# since the text quality has decreased from thresholding
# then bitwise-and with input image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2,2))
dilate = cv2.dilate(result, kernel, iterations=1)
result = cv2.bitwise_and(image, image, mask=dilate)
result[dilate==0] = [255,255,255]

cv2.imshow('dot_mask', dot_mask)
cv2.imshow('thresh', thresh)
cv2.imshow('result', result)
cv2.imshow('dilate', dilate)
cv2.waitKey()
示例#37
0
import cv2

# Reading image
img = cv2.imread("img_path")

# Edges
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)

# sketch
color = cv2.bilateralFilter(img, 9, 250, 250)
cartoon = cv2.bitwise_and(color, color, mask=edges)

cv2.imshow("Image", img)
cv2.imshow("edges", edges)
cv2.imshow("Cartoon", cartoon)
cv2.waitkey(0)
cv2.destroyAllWindows()
示例#38
0
    GC_PR_FGD 定义为可能的前景像素 3
    rect 表示roi区域
    bgdModel表示临时背景模型数组
    fgdModel表示临时前景模型数组
    iterCount表示图割算法迭代次数
    mode当使用用户提供的roi时候使用GC_INIT_WITH_RECT

    拓展资料:
        官方教程:Interactive Foreground Extraction using GrabCut Algorithm:https://docs.opencv.org/4.5.3/d8/d83/tutorial_py_grabcut.html
'''

import cv2 as cv
import numpy as np

img = cv.imread("../data/images/master.jpg")
rect = cv.selectROI("select roi", img, showCrosshair=True) # 交互选取分割目标区域

# 初始化mask 以及 前背景模型,注意这里的类型
bgdModel = np.zeros((1, 65), dtype=np.float64) # float64
fgdModel = np.zeros((1, 65), dtype=np.float64)
mask = np.zeros(img.shape[:2], dtype=np.uint8)

# grabcut分割
mask, bgdModel, fgdModel = cv.grabCut(img, mask, rect, bgdModel, fgdModel, 6, cv.GC_INIT_WITH_RECT)
fg_mask = np.where((mask == 1) + (mask == 3), 255, 0).astype(np.uint8)

# 提取分割得到的前景对象
result = cv.bitwise_and(img, img, mask=fg_mask)
cv.imshow("result", result)
cv.waitKey(0)
cv.destroyAllWindows()
    xframe = len(frame[0])
    yframe = len(frame)
    ycnt = [0,0,0,0]
    xcnt = [0,0,0,0]
    cnt = [0,0,0,0]
    radius = [0,0,0,0]
    center = [0,0,0,0]
    cnter = [0,0,0,0]
    shib = 0

    cropped = [frame[int(yframe/4):int(yframe-yframe/4), 0:int(xframe/4)] , frame[int(yframe/4):int(yframe-yframe/4), int((xframe/4)*3):xframe] , frame[0:int(yframe/4), 0:xframe] , frame[int((yframe/4)*3):yframe, 0:xframe] ]
    hsvfr = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
    hsvbluredfr = cv.GaussianBlur(hsvfr, (5, 5), 0)
    binaryfr = cv.inRange(hsvbluredfr, minf, maxf)
    medianedfr = cv.medianBlur(binaryfr, 25)
    filteredfr = cv.bitwise_and(frame,frame, mask= medianedfr)
    im2fr, contoursfr, hierarchyfr = cv.findContours(medianedfr,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)

    for d in range(0,4):
        hsv[d] = cv.cvtColor(cropped[d], cv.COLOR_BGR2HSV)
        hsvblured[d] = cv.GaussianBlur(hsv[d], (5, 5), 0)
        binary[d] = cv.inRange(hsvblured[d], minf, maxf)
        medianed[d] = cv.medianBlur(binary[d], 25)
        im2[d], contours[d], hierarchy[d] = cv.findContours(medianed[d],cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)  

    try:  
        if len(contoursfr) > 0:
            cntfr = max(contoursfr, key = cv.contourArea)
            (xfr,yfr), radiusfr = cv.minEnclosingCircle(cntfr) 
            centerfr = (int(xfr),int(yfr)) 
            cv.circle(frame,centerfr,2,(255,0,0),2)
示例#40
0
while(True): 
      
    # Capture the video frame 
    # by frame 
    ret, frame = vid.read() 


    # Convert BGR to HSV
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    # define range of blue color in HSV
    lower_blue = np.array([110,50,50])
    upper_blue = np.array([130,255,255])
    # Threshold the HSV image to get only blue colors
    mask = cv2.inRange(hsv, lower_blue, upper_blue)
    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame,frame, mask= mask)

    # Display the resulting frame 
    cv2.imshow('frame',frame)
    cv2.imshow('mask',mask)
    cv2.imshow('res',res)      
    # the 'q' button is set as the 
    # quitting button you may use any 
    # desired button of your choice 
    if cv2.waitKey(1) & 0xFF == ord('q'): 
        break
  
# After the loop release the cap object 
vid.release() 
# Destroy all the windows 
cv2.destroyAllWindows() 
示例#41
0
def light_recog(frame, direct, traffic_lights):
    traffic_light = traffic_lights[0]

    # find out which traffic light to follow, if there are several
    if len(traffic_lights) > 1:
        # if we need to go to the right
        if direct == Direct.RIGHT or direct == Direct.SLIGHTLY_RIGHT:
            for tl in traffic_lights:
                if tl['topleft']['x'] > traffic_light['topleft']['x']:
                    traffic_light = tl
        # straight or left
        else:
            for tl in traffic_lights:
                if tl['topleft']['x'] < traffic_light['topleft']['x']:
                    traffic_light = tl

    # coordinates of the traffic light
    top_left = (traffic_light['topleft']['x'], traffic_light['topleft']['y'])
    bottom_right = (traffic_light['bottomright']['x'],
                    traffic_light['bottomright']['y'])
    # crop the frame to the traffic light
    roi = frame[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
    hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    color_detected = ''

    # possible color ranges for traffic lights
    red_lower = np.array([136, 87, 111], dtype=np.uint8)
    red_upper = np.array([180, 255, 255], dtype=np.uint8)

    yellow_lower = np.array([22, 60, 200], dtype=np.uint8)
    yellow_upper = np.array([60, 255, 255], dtype=np.uint8)

    green_lower = np.array([50, 100, 100], dtype=np.uint8)
    green_upper = np.array([70, 255, 255], dtype=np.uint8)

    # find what color the traffic light is showing
    red = cv2.inRange(hsv, red_lower, red_upper)
    yellow = cv2.inRange(hsv, yellow_lower, yellow_upper)
    green = cv2.inRange(hsv, green_lower, green_upper)

    kernel = np.ones((5, 5), np.uint8)

    red = cv2.dilate(red, kernel)
    res = cv2.bitwise_and(roi, roi, mask=red)
    green = cv2.dilate(green, kernel)
    res2 = cv2.bitwise_and(roi, roi, mask=green)

    (_, contours, hierarchy) = cv2.findContours(red, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    for contour in enumerate(contours):
        color_detected = "Red"

    (_, contours, hierarchy) = cv2.findContours(yellow, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    for contour in enumerate(contours):
        color_detected = "Yellow"

    (_, contours, hierarchy) = cv2.findContours(green, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    for contour in enumerate(contours):
        color_detected = "Green"

    if (0 <= top_left[1]
            and bottom_right[1] <= 437) and (244 <= top_left[0]
                                             and bottom_right[0] <= 630):
        frame = cv2.putText(frame, color_detected, bottom_right,
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)

    frame = cv2.putText(frame, color_detected, bottom_right,
                        cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)

    return frame, color_detected
示例#42
0
    global rgbl, rgbh, image, image_h
    ret, image = frame.read(0)
    cv2.imshow("window", image)
    cv2.setMouseCallback('window', mouse_click)
    if (para == 1):
        cv2.imshow("window1", cv2.medianBlur(image, 11))
    elif (para == 2):
        cv2.imshow("window1", cv2.GaussianBlur(image, (5, 5), 0))

    elif (para == 3):
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        cv2.imshow("window1", gray)
    elif (para == 4):
        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        image1 = cv2.inRange(hsv, rgbl, rgbh)
        image2 = cv2.bitwise_and(image, image, mask=image1)
        cv2.imshow("window1", image2)

    elif (para == 5):
        b, g, r = cv2.split(image)
        image = cv2.merge((r, g, b))
        cv2.imshow("window1", image)
    elif (para == 6):
        b, g, r = cv2.split(image)
        image = cv2.merge((b, g, r))
        cv2.imshow("window1", image)
    elif (para == 0):
        cv2.imshow("window1", cv2.bilateralFilter(image, 31, 350, 350))
    k = cv2.waitKey(1)
    if k == ord('s'):
        para = 5
示例#43
0
#finds the center of a contour
#takes a single contour
#returns (x,y) position of the contour
def contour_center(c):
    M = cv2.moments(c)
    try: center = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
    except: center = 0,0
    return center

#takes image and range
#returns parts of image in range
def only_color(img, (h,s,v,h1,s1,v1)):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    lower, upper = np.array([h,s,v]), np.array([h1,s1,v1])
    mask = cv2.inRange(hsv, lower, upper)
    res = cv2.bitwise_and(img, img, mask=mask)
    kernel = np.ones((3,3), np.uint)
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    return res, mask

#returns the region of interest around the largest countour
#the accounts for objects not being centred in the frame
def bbox(img, c):
    x,y,w,h = cv2.boundingRect(c)
    return img[y-pad:y+h+pad, x-pad:w+x+pad], (x,y)

#import model
from keras.models import load_model
model = load_model('/ITMAL/MAL_GRP07/Project/shapes/shapes_model.h5')
dimData = np.prod([img_size, img_size])
示例#44
0
det = cv2.SimpleBlobDetector_create(params)

# define blue
lower_blue = np.array([80, 60, 20])
upper_blue = np.array([130, 255, 255])

while True:
    ret, frame = cap.read()

    imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    blueMask = cv2.inRange(imgHSV, lower_blue, upper_blue)
    blur = cv2.blur(blueMask, (10, 10))

    res = cv2.bitwise_and(frame, frame, mask=blueMask)

# get and draw keypoint
keypnts = det.detect(blur)

cv2.drawKeypoints(frame, keypnts, frame, (0, 0, 255),
                  cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imshow('frame', frame)
cv2.imshow('mask', blur)

for k in keypnts:
    print(k.size)

if cv2.waitKey(1) & 0xff == ord('q'):
    sys.exit()
示例#45
0
def img_dif(x, y, z):
    img1 = cv2.absdiff(x, y)
    img2 = cv2.absdiff(y, z)
    dif = cv2.bitwise_and(img1, img2)
    print(dif)
    return dif
示例#46
0
mask2[48:51,26:70]=0'''

for subdir, dirs, files in os.walk(src):
    print 'subdie is ', subdir
    for file1 in files:
        print 'file number is ', i
        #for i in range(0,len(filters)):
        img = cv2.imread(src + file1, 0)
        #img.astype(np.float32)
        fimg = img - img.mean()
        path = prefix + 'CenteredWhaleData/'
        if not os.path.exists(path):
            os.makedirs(path)
        imgP = path + file1
        #fimg.astype(np.uint8)
        image_m1 = cv2.bitwise_and(fimg, fimg, mask=mask1)
        image_m2 = cv2.bitwise_and(fimg, fimg, mask=mask2)
        sum1 = image_m1.sum()
        #res=image_m1
        sum2 = image_m2.sum()
        #if sum1>sum2:
        #res=image_m1[39:60]
        #else:
        #res=image_m2[30:51]
        res = image_m1[30:60]
        #pl.subplot(2,2,i)
        #pl.imshow(fimg,cmap='gray')
        #pl.title(file1)
        #i=i+1
        #pl.subplot(2,2,i)
        #pl.imshow(res,cmap='gray')
示例#47
0
import cv2
import numpy as np

blank = np.zeros((400, 400), dtype='uint8')

rectangle = cv2.rectangle(blank.copy(), (30, 30), (370, 370), 255, -1)
circle = cv2.circle(blank.copy(), (200, 200), 200, 255, -1)

cv2.imshow("rectangle", rectangle)
cv2.imshow("circle", circle)

#intersecting regions
bitand = cv2.bitwise_and(rectangle, circle)
cv2.imshow("bitwise and", bitand)

#non intersecting regions and intersecting regions
bitor = cv2.bitwise_or(rectangle, circle)
cv2.imshow("bitwise or", bitor)

#non intersecting regions
bitxor = cv2.bitwise_xor(rectangle, circle)
cv2.imshow("bitwise xor", bitxor)

bitnot = cv2.bitwise_not(rectangle)
cv2.imshow("bitwise not", bitnot)

cv2.waitKey(0)
def find_squares(n):
    global hsv, r_mask, color_mask, white_mask, black_filter

    h, s, v = cv2.split(hsv)
    h_sqr = np.square(h)

    sz = height // n
    border = 1 * sz

    varmax_edges = 20  # wichtig
    for y in range(border, height - border, sz):
        for x in range(border, width - border, sz):

            # rect_h = h[y:y + sz, x:x + sz]
            rect_h = h[y:y + sz, x:x + sz]
            rect_h_sqr = h_sqr[y:y + sz, x:x + sz]

            median_h = np.sum(rect_h) / sz / sz

            sqr_median_hf = median_h * median_h
            median_hf_sqr = np.sum(rect_h_sqr) / sz / sz
            var = median_hf_sqr - sqr_median_hf

            sigma = np.sqrt(var)

            delta = delta_C

            if sigma < sigma_W:  # sigma liegt für weiß höher, 10-100

                ex_rect = hsv[y - 1 * sz:y + 2 * sz,
                              x - 1 * sz:x + 2 * sz].copy()  # warum copy?
                r_mask = cv2.inRange(
                    ex_rect, (0, 0, val_W_min),
                    (255, sat_W_max, 255))  # saturation high 30, value low 180
                r_mask = cv2.bitwise_or(
                    r_mask, white_filter[y - 1 * sz:y + 2 * sz,
                                         x - 1 * sz:x + 2 * sz])
                white_filter[y - 1 * sz:y + 2 * sz,
                             x - 1 * sz:x + 2 * sz] = r_mask

            if sigma < sigma_C:  # übrigen echten Farben  1-3
                ex_rect = h[y - 1 * sz:y + 2 * sz,
                            x - 1 * sz:x + 2 * sz].copy()  # warum copy?
                if median_h + delta >= 180:
                    r_mask = cv2.inRange(ex_rect, 0, median_h + delta - 180)
                    r_mask = cv2.bitwise_or(
                        r_mask, cv2.inRange(ex_rect, median_h - delta, 180))
                elif median_h - delta < 0:
                    r_mask = cv2.inRange(ex_rect, median_h - delta + 180, 180)
                    r_mask = cv2.bitwise_or(
                        r_mask, cv2.inRange(ex_rect, 0, median_h + delta))
                else:
                    r_mask = cv2.inRange(ex_rect, median_h - delta,
                                         median_h + delta)

                r_mask = cv2.bitwise_or(
                    r_mask, color_filter[y - 1 * sz:y + 2 * sz,
                                         x - 1 * sz:x + 2 * sz])
                color_filter[y - 1 * sz:y + 2 * sz,
                             x - 1 * sz:x + 2 * sz] = r_mask

    black_filter = cv2.inRange(
        bgrcap, (0, 0, 0), (rgb_L, rgb_L, rgb_L))  # wichtiger parameter 30-50
    black_filter = cv2.bitwise_not(black_filter)

    color_filter = cv2.bitwise_and(color_filter, black_filter)
    color_filter = cv2.blur(color_filter, (20, 20))
    color_filter = cv2.inRange(color_filter, 240, 255)

    white_filter = cv2.bitwise_and(white_filter, black_filter)
    white_filter = cv2.blur(white_filter, (20, 20))
    white_filter = cv2.inRange(white_filter, 240, 255)

    itr = iter([color_filter, white_filter])

    for j in itr:
        im2, contours, hierarchy = cv2.findContours(j, cv2.RETR_EXTERNAL,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        for n in range(len(contours)):
            approx = cv2.approxPolyDP(contours[n], sz // 2, True)
            if approx.shape[0] < 4 or approx.shape[0] > 4:
                continue
            corners = approx[:, 0]

            edges = np.array([
                cv2.norm(corners[0] - corners[1], cv2.NORM_L2),
                cv2.norm(corners[1] - corners[2], cv2.NORM_L2),
                cv2.norm(corners[2] - corners[3], cv2.NORM_L2),
                cv2.norm(corners[3] - corners[0], cv2.NORM_L2)
            ])
            edges_mean_sq = (np.sum(edges) / 4)**2
            edges_sq_mean = np.sum(np.square(edges)) / 4
            if edges_sq_mean - edges_mean_sq > varmax_edges:
                continue
            # cv2.drawContours(bgrcap, [approx], -1, (0, 0, 255), 8)
            middle = np.sum(corners, axis=0) / 4
            cent.append(np.asarray(middle))
示例#49
0
def CannyThreshold(lowThreshold):  
    #detected_edges = cv2.GaussianBlur(gray,(3,3),0)  
    detected_edges = gray
    detected_edges = cv2.Canny(detected_edges,lowThreshold,lowThreshold*ratio,apertureSize = kernel_size)  
    dst = cv2.bitwise_and(img,img,mask = detected_edges)  # just add some colours to edges from original image.  
    cv2.imshow('canny demo',dst)  
def hough_circle_detection(X_img_file_paths, mode):
    """Circle Detection and Resizing Method"""
    #accepts a file path
    dirname = ""
    if mode == 0:
        dirname = "TrainingFinal"
    elif mode == 1:
        dirname = "Testing"

    X_data = list()
    global REJECT

    for filename in os.listdir(X_img_file_paths):
        if "jpg" in filename or "jpeg" in filename:
            print(filename)

            img = Image.open(X_img_file_paths + "/" + filename)
            gray = cv2.cvtColor(
                np.array(img).astype(np.uint8), cv2.COLOR_BGR2GRAY)
            if gray.shape[1] > 800:
                new_width = 800
                ratio = gray.shape[1] / 800
                new_height = gray.shape[0] / ratio
                gray = scipy.misc.imresize(gray,
                                           (int(new_height), int(new_width)))

            gray_gauss = cv2.GaussianBlur(gray, (5, 5), 0)
            gray_smooth = cv2.addWeighted(gray_gauss, 1.5, gray, -0.5, 0)
            circles = cv2.HoughCircles(gray_smooth,
                                       cv2.HOUGH_GRADIENT,
                                       1,
                                       200,
                                       param1=30,
                                       param2=15,
                                       minRadius=int(gray.shape[1] / 8.5),
                                       maxRadius=int(gray.shape[1] / 7))

            if circles is None:
                print('problem')
                break
                #circles = cv2.HoughCircles(median_blur, cv2.HOUGH_GRADIENT, 1, max(gray.shape[0], gray.shape[1]) * 2, param1=50, param2=30, minRadius=int(gray.shape[1]/8),maxRadius=int(gray.shape[1]/6))
                badcanny_filename_array.append(filename)

            else:

                center_x = circles[0][0][0]
                center_y = circles[0][0][1]
                radius = circles[0][0][2]
                rectX = int(center_x) - int(radius)
                rectY = int(center_y) - int(radius)

                crop_img = gray[rectY:(rectY + 2 * int(radius)),
                                rectX:(rectX + 2 * int(radius))]

                if center_x > gray.shape[1] / 2.5 and center_x < gray.shape[
                        1] / 1.5:
                    print('I found!')

                    #GETTING THE NORMAL IMAGE
                    crop_img_final = scipy.misc.imresize(
                        crop_img, (IMAGE_SIZE, IMAGE_SIZE))
                    crop_img_final = cv2.fastNlMeansDenoising(crop_img_final)
                    cv2.imwrite(
                        os.path.join(dirname,
                                     str(filename) + dirname + '.png'),
                        crop_img_final)
                    X_data.append(crop_img_final)

                    #GETTING THE ENHANCED VERSION
                    image_enhanced = cv2.equalizeHist(crop_img_final)
                    cv2.imwrite(
                        os.path.join(
                            dirname,
                            str(filename) + dirname + 'enhanced' + '.png'),
                        image_enhanced)
                    X_data.append(image_enhanced)

                    #GETTING THE THRESHOLDED VERSION
                    image_thresholded = cv2.adaptiveThreshold(
                        crop_img_final, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                        cv2.THRESH_BINARY_INV, 11, 2)
                    cv2.imwrite(
                        os.path.join(
                            dirname,
                            str(filename) + dirname + 'thresholded' + '.png'),
                        image_thresholded)
                    X_data.append(image_thresholded)

                    #REFERENCE MASKING THE THRESHOLDED IMAGE (FOR INTEREST ZONES)
                    interest_zones_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE),
                                                   np.uint8)
                    cv2.circle(interest_zones_mask,
                               (int(IMAGE_SIZE / 2), int(IMAGE_SIZE / 4)),
                               int(IMAGE_SIZE / 9),
                               255,
                               thickness=-1)
                    cv2.circle(interest_zones_mask,
                               (int(IMAGE_SIZE / 2), 3 * int(IMAGE_SIZE / 4)),
                               int(IMAGE_SIZE / 9),
                               255,
                               thickness=-1)
                    cv2.circle(interest_zones_mask,
                               (int(IMAGE_SIZE / 4), int(IMAGE_SIZE / 2)),
                               int(IMAGE_SIZE / 9),
                               255,
                               thickness=-1)

                    interest_zones_img = cv2.bitwise_and(
                        interest_zones_mask, image_thresholded)
                    cv2.imwrite(
                        os.path.join(
                            dirname,
                            str(filename) + dirname + 'MASKEDINTEREST' +
                            '.png'), interest_zones_img)

                    #REFERENCE MASKING THE THRESHOLDED IMAGE
                    #FOR UNINTERESTED ZONES TO DETECT SHADOWS AND ANOMALITIES
                    irregularity_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE),
                                                 np.uint8)
                    irregularity_mask = irregularity_mask + 255  #turn from black to white

                    cv2.circle(irregularity_mask, (0, 0),
                               int(IMAGE_SIZE / 4),
                               0,
                               thickness=-1)  #color top left corner
                    cv2.circle(irregularity_mask, (0, IMAGE_SIZE),
                               int(IMAGE_SIZE / 4),
                               0,
                               thickness=-1)  #color bottom left corner
                    cv2.circle(irregularity_mask, (IMAGE_SIZE, 0),
                               int(IMAGE_SIZE / 4),
                               0,
                               thickness=-1)  #color top right corner
                    cv2.circle(irregularity_mask, (IMAGE_SIZE, IMAGE_SIZE),
                               int(IMAGE_SIZE / 4),
                               0,
                               thickness=-1)  #color bottom right corner

                    cv2.circle(
                        irregularity_mask,
                        (int(IMAGE_SIZE / 2), int(IMAGE_SIZE / 2)),
                        int(IMAGE_SIZE / 2),
                        0,
                        thickness=5
                    )  #Marker for the surrounding circular cell membrane edges

                    cv2.circle(irregularity_mask,
                               (int(IMAGE_SIZE / 2), int(IMAGE_SIZE / 4)),
                               int(IMAGE_SIZE / 9),
                               0,
                               thickness=-1)  #Spot for Control Zone
                    cv2.circle(irregularity_mask,
                               (int(IMAGE_SIZE / 2), 3 * int(IMAGE_SIZE / 4)),
                               int(IMAGE_SIZE / 9),
                               0,
                               thickness=-1)  #Spot for HIV Zone
                    cv2.circle(irregularity_mask,
                               (int(IMAGE_SIZE / 4), int(IMAGE_SIZE / 2)),
                               int(IMAGE_SIZE / 9),
                               0,
                               thickness=-1)  #Spot for Syphilis Zone

                    irregularity_img = cv2.bitwise_and(irregularity_mask,
                                                       image_thresholded)

                    #LOOP THROUGH THE IRREGULARITY IMAGE TO COUNT THE # OF WHITE PIXELS REPRESTING IRREGULARITIES.
                    total_pixels = irregularity_img.shape[
                        0] * irregularity_img.shape[1]
                    total_white_pixels = 0
                    for i in range(irregularity_img.shape[0]):
                        for j in range(irregularity_img.shape[1]):
                            pixel = irregularity_img[i][j]
                            if (pixel == 255):
                                total_white_pixels += 1

                    percentage = (total_white_pixels / total_pixels) * 100

                    text_file.write("PHOTO NAME: %s ;" % filename)
                    if (percentage > 5):
                        REJECT += 1
                        reject_array.append(filename)
                        text_file.write(
                            "Irregularity Percentage ABOVE LIMIT %s " %
                            str(percentage))
                    else:
                        text_file.write("Irregularity Percentage %s " %
                                        str(percentage))

                    text_file.write("\n\n")
                    cv2.imwrite(
                        os.path.join(
                            dirname,
                            str(filename) + dirname + 'MASKEDIRREGULARITY' +
                            "%" + str(percentage) + '.png'), irregularity_img)

                else:
                    c = scipy.misc.imresize(crop_img, (IMAGE_SIZE, IMAGE_SIZE))
                    Image.fromarray(c).show()
                    badcanny_filename_array.append(filename)
                    print("COULDN'T FIND!")

    return X_data
示例#51
0
        lower = np.array([-30, 100, 50]) # red
        upper = np.array([30, 255, 255]) # red
#       lower = np.array([30, 100, 50]) # yellow
#       upper = np.array([50, 255, 255]) # yellow
#       lower = np.array([120, 100, 50]) # blue
#       upper = np.array([140, 255, 255]) # blue
#       lower = np.array([40, 100, 50]) # green
#       upper = np.array([60, 255, 255]) # green

        # 値が指定した範囲内の画素は255、範囲外の画素を0にする二値化
        mask_image = cv2.inRange(hsv_image, lower, upper)
	mask_sum = mask_image.sum()
        print("合計値: %d" % mask_sum)

        # 先程二値化した画像をマスク画像としてBGR画像を切り抜き
        processed_image = cv2.bitwise_and(bgr_image, bgr_image, mask=mask_image)

        # 重心を求める
        mom = cv2.moments(mask_image)
        cx, cy = 0, 0
        if "m00" in mom and "m10" in mom and "m01" in mom and mom["m00"] <> 0:
            cx = int(mom["m10"]/mom["m00"])
            cy = int(mom["m01"]/mom["m00"])
        print(cx, cy)

        # 求めた重心の位置を示すために紫色の点を描画
        color = (255, 0, 255)
        processed_image = cv2.circle(processed_image, (cx, cy), 3, color, -1)

        # 加工した画像をROS Topicの形式に変換してpublish
        image_msg = bot.bridge.cv2_to_imgmsg(processed_image, "bgr8")
示例#52
0
import cv2
import numpy as np

image = cv2.imread("images/beach.png")
cv2.imshow("Original", image)
cv2.waitKey(0)

(cX, cY) = (image.shape[1]//2, image.shape[0]//2)

# --- rectangular mask ---
mask_rect = np.zeros(image.shape[:2], dtype="uint8")
cv2.rectangle(mask_rect, (cX-75, cY-75), (cX+75, cY+75), 255, -1)
cv2.imshow("Rectangular Mask", mask_rect)
cv2.waitKey(0)
masked = cv2.bitwise_and(image, image, mask=mask_rect)
cv2.imshow("Rectangular Mask Applied to Image", masked)
cv2.waitKey(0)

# --- circular mask ---
mask_cir = np.zeros(image.shape[:2], dtype="uint8")
cv2.circle(mask_cir, (cX, cY), 100, 255, -1)
cv2.imshow("Circular Mask", mask_cir)
cv2.waitKey(0)
masked = cv2.bitwise_and(image, image, mask=mask_cir)
cv2.imshow("Circular Mask Applied to Image", masked)
cv2.waitKey(0)
            #     if aspRatio > 0.95 and aspRatio < 1.05:
            #         objectType = "Square"
            #     else:
            #         objectType = "Rectangle"
            # else:
            #     objectType = "Else"
                #原图,左上角,右下角,颜色,宽度
            cv2.rectangle(imgContour, (x - 10, y - 10), (x + w + 30, y + h +10), (0, 255, 0), 2)

            # 标注
            # (载体,类型,位置,字体,比例,颜色,厚度)
            cv2.putText(imgContour, "badminton",
                        (x + (w // 2) - 10, y + (h // 2) - 10), cv2.FONT_HERSHEY_COMPLEX,
                        0.9, (0, 255, 0), 2)

imgMask = cv2.bitwise_and(img,img,mask=mask)        #把img和mask做and操作,使得图片二值化
cv2.imshow("imgmask",imgMask)
imgblur1 = cv2.GaussianBlur(imgMask,(13,13),10,10)      #做两次高斯滤波,边缘平滑(查)
imgblur2 = cv2.GaussianBlur(imgblur1,(7,7),30,30)
kernel = np.ones((3,3),np.uint8)                        #膨胀,查
imgdialation = cv2.dilate(imgblur2, kernel, iterations=2)
imgCanny = cv2.Canny(imgdialation, 80, 120)             # 寻找边界
getContours(imgCanny)                                   # 画框
cv2.imshow("canny",imgCanny)
cv2.imshow("Result",imgContour)

cv2.imwrite("resources/result1.jpg", imgContour)
cv2.imwrite("resources/canny1.jpg", imgCanny)
cv2.imwrite("resources/mask1.jpg", imgMask)
cv2.waitKey(0)
####################################################
示例#54
0
def main():

    global new_image

    rs_img = rs_process()
    rospy.init_node('hand_tracking', anonymous=True)
    rospy.loginfo("Hand Detection Start!")

    #Marker Publisher Initialize
    pub = rospy.Publisher('/hand_marker', Marker, queue_size=1)
    hand_mark = MarkerGenerator()
    hand_mark.type = Marker.SPHERE_LIST
    hand_mark.scale = [.07] * 3
    hand_mark.frame_id = "/camera_color_optical_frame"
    hand_mark.id = 0
    hand_mark.lifetime = 10000

    #hand detect args
    parser = argparse.ArgumentParser()
    parser.add_argument('-sth',
                        '--scorethreshold',
                        dest='score_thresh',
                        type=float,
                        default=0.5,
                        help='Score threshold for displaying bounding boxes')
    parser.add_argument('-fps',
                        '--fps',
                        dest='fps',
                        type=int,
                        default=1,
                        help='Show FPS on detection/display visualization')
    parser.add_argument('-src',
                        '--source',
                        dest='video_source',
                        default=0,
                        help='Device index of the camera.')
    parser.add_argument('-wd',
                        '--width',
                        dest='width',
                        type=int,
                        default=640,
                        help='Width of the frames in the video stream.')
    parser.add_argument('-ht',
                        '--height',
                        dest='height',
                        type=int,
                        default=360,
                        help='Height of the frames in the video stream.')
    parser.add_argument(
        '-ds',
        '--display',
        dest='display',
        type=int,
        default=0,
        help='Display the detected images using OpenCV. This reduces FPS')
    parser.add_argument('-num-w',
                        '--num-workers',
                        dest='num_workers',
                        type=int,
                        default=4,
                        help='Number of workers.')
    parser.add_argument('-q-size',
                        '--queue-size',
                        dest='queue_size',
                        type=int,
                        default=5,
                        help='Size of the queue.')
    args = parser.parse_args()
    num_hands_detect = 2

    im_width, im_height = (args.width, args.height)

    #time for fps calculation
    start_time = datetime.datetime.now()
    num_frames = 0

    #skin filter color
    lower = np.array([0, 48, 80], dtype="uint8")
    upper = np.array([20, 255, 255], dtype="uint8")

    #######################################
    #Define the frame to transform
    #######################################
    target_frame = "/camera_color_optical_frame"  ######FROM
    reference_frame = "/base_link"  ####TO

    #####################################
    #Define the numpy array to record the consequences of the hand location
    ######################################
    # hand_pos = np.empty((1,3))

    is_transform_target = False

    if (is_transform_target):
        listener = tf.TransformListener()
        listener.waitForTransform(reference_frame, target_frame, rospy.Time(0),
                                  rospy.Duration(4.0))
        hand_mark.frame_id = reference_frame
    else:
        hand_mark.frame_id = target_frame

    hand_pose_list = []
    while not rospy.is_shutdown():
        #get rgb,depth frames for synchronized frames
        if not new_image:
            continue

        im_rgb = rs_image_rgb
        # im_rgb = cv2.cvtColor(rs_image_rgb, cv2.COLOR_BGR2RGB)
        im_depth = rs_image_depth
        new_image = False
        #add check

        # depth_map = np.array(rs_image_depth, dtype=np.uint8)
        depth_map = cv2.applyColorMap(
            cv2.convertScaleAbs(rs_image_depth, alpha=0.03), cv2.COLORMAP_JET)
        # cv2.imshow("Depth Image", depth_map)
        cv2.imshow("rs_image_rgb", rs_image_rgb)

        try:
            image_np = im_rgb
        except:
            print("Error converting to RGB")

        # actual hand detection
        boxes, scores = detector_utils.detect_objects(image_np,
                                                      detection_graph, sess)
        # draw bounding boxes
        detector_utils.draw_box_on_image(num_hands_detect, args.score_thresh,
                                         scores, boxes, im_width, im_height,
                                         image_np)

        if (scores[0] > args.score_thresh):
            (left, right, top,
             bottom) = (boxes[0][1] * im_width, boxes[0][3] * im_width,
                        boxes[0][0] * im_height, boxes[0][2] * im_height)
            p1 = (int(left), int(top))
            p2 = (int(right), int(bottom))
            # print p1,p2,int(left),int(top),int(right),int(bottom)
            image_hand = image_np[int(top):int(bottom), int(left):int(right)]
            # cv2.namedWindow("hand", cv2.WINDOW_NORMAL)
            cv2.imshow('hand', cv2.cvtColor(image_hand, cv2.COLOR_RGB2BGR))

            align_hand = im_rgb[int(top):int(bottom), int(left):int(right)]
            align_depth = depth_map[int(top):int(bottom), int(left):int(right)]
            align_hand_detect = np.hstack((align_hand, align_depth))
            # cv2.namedWindow('align hand', cv2.WINDOW_AUTOSIZE)
            # cv2.imshow('align hand', align_hand_detect)

            # cv2.imshow('align_hand_bgr', align_hand)
            align_hand = cv2.cvtColor(align_hand, cv2.COLOR_BGR2RGB)
            #skin filtering
            converted = cv2.cvtColor(align_hand, cv2.COLOR_BGR2HSV)
            skinMask = cv2.inRange(converted, lower, upper)
            # cv2.imshow("skinMask", skinMask)

            # apply a series of erosions and dilations to the mask
            # using an elliptical kernel
            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            # skinMask = cv2.erode(skinMask, kernel, iterations = 2)
            # skinMask = cv2.dilate(skinMask, kernel, iterations = 2)

            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
            skinMask = cv2.erode(skinMask, kernel, iterations=3)
            skinMask = cv2.dilate(skinMask, kernel, iterations=3)

            # blur the mask to help remove noise, then apply the
            # mask to the frame
            skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
            skin = cv2.bitwise_and(align_hand, align_hand, mask=skinMask)
            # show the skin in the image along with the mask
            # cv2.imshow("images", np.hstack([align_hand, skin]))
            #end skin

            depth_pixel = [(int(left) + int(right)) / 2,
                           (int(top) + int(bottom)) / 2]
            # depth_point = [0.0,0.0,0.0]
            depth_point = rs.rs2_deproject_pixel_to_point(
                depth_intrin, depth_pixel,
                im_depth[depth_pixel[1], depth_pixel[0]] * depth_scale)
            print depth_point
            hand_mark.counter = 0
            t = rospy.get_time()
            hand_mark.color = [0, 1, 0, 1]

            # hand_mark.id = hand_mark.id + 1
            # if (hand_mark.id > 100000) :
            #     hand_mark.id = 0
            # ## hand in /camera_color_optical_frame
            # print ('id ',hand_mark.id)
            m0 = hand_mark.marker(points=[(depth_point[0], depth_point[1],
                                           depth_point[2])])

            hand_point_x = depth_point[0]
            hand_point_y = depth_point[1]
            hand_point_z = depth_point[2]

            if (is_transform_target):
                #########################################################################
                ##convert /camera_color_optical_frame => /world
                #########################################################################

                #transform position from target_frame to reference frame
                target_ref_camera = PointStamped()
                target_ref_camera.header.frame_id = target_frame
                target_ref_camera.header.stamp = rospy.Time(0)
                target_ref_camera.point = m.points[0]

                p = listener.transformPoint(reference_frame, target_ref_camera)
                # p=listener.transformPoint(reference_frame,hand_mark)

                m = hand_mark.marker(points=[(p.point.x, p.point.y,
                                              p.point.z)])

                #substitute data for the variable
                hand_point_x = p.point.x
                hand_point_y = p.point.y
                hand_point_z = p.point.z

                # pub.publish(m)

                #offset z-axiz
                # hand_mark.id = 1
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m)
            else:
                # print('published!')
                ####append the data

                if 0.15 <= hand_point_z <= 0.75 and -0.4 <= hand_point_x <= 0.4:
                    print("recorded hand point")
                    hand_pose = [
                        hand_point_x, hand_point_y, hand_point_z, 0.0, 0.0,
                        0.0, 1.0
                    ]
                    print hand_pose
                    hand_pose_list.append(hand_pose)

                pub.publish(m0)

                #substitute data for the variable
                hand_point_x = depth_point[0]
                hand_point_y = depth_point[1] - 0.08
                hand_point_z = depth_point[2]

                #### offset z-axiz
                # hand_mark.id = 1
                m1 = hand_mark.marker(points=[(depth_point[0],
                                               depth_point[1] - 0.08,
                                               depth_point[2])])
                # m = hand_mark.marker(points= [(p.point.x, p.point.y, p.point.z + 0.10)])
                # pub.publish(m1)

        # Calculate Frames per second (FPS)
        num_frames += 1
        elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
        fps = num_frames / elapsed_time

        #display window
        if (args.display > 0):
            # Display FPS on frame
            if (args.fps > 0):
                detector_utils.draw_fps_on_image("FPS : " + str(float(fps)),
                                                 image_np)

            cv2.imshow('Single Threaded Detection',
                       cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
        else:
            print("frames processed: ", num_frames, "elapsed time: ",
                  elapsed_time, "fps: ", str(int(fps)))

        if cv2.waitKey(10) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break

    print('save hand_pub.npy')
    # np.save('./hand_pub.npy',hand_pos)
    np.save("hand_pub", hand_pose_list)
示例#55
0
camera = cv2.VideoCapture(0)
while True:
        ret, frame = camera.read()

        filtre = cv2.GaussianBlur(frame, (5, 5), 0)
        hsv = cv2.cvtColor(filtre, cv2.COLOR_BGR2HSV)

        kernal = np.ones((5, 5), "uint8")


        low_red = np.array([136, 87, 111], np.uint8)
        high_red = np.array([180, 255, 243], np.uint8)
        red_mask = cv2.inRange(hsv, low_red, high_red)
        red = cv2.dilate(red_mask, kernal)
        red1 = cv2.bitwise_and(frame, frame, mask=red_mask)

        contours, ret = cv2.findContours(red, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for pic, contour in enumerate(contours):
            area = cv2.contourArea(contour)
            if area > 200:
                x, y, w, h = cv2.boundingRect(contour)
                frame = cv2.drawContours(frame, contours, -1, [0, 255, 0], 3)
                cv2.putText(frame, "Red Color", (x - 3, y - 3), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255))


        low_blue = np.array([94, 80, 2], np.uint8)
        high_blue = np.array([126, 255, 255], np.uint8)
        blue_mask = cv2.inRange(hsv, low_blue, high_blue)
        blue = cv2.erode(blue_mask, kernal)
        blue1 = cv2.bitwise_and(frame, frame, mask=blue_mask)
    # blue range
    lower_blue = np.array([110, 50, 50])
    upper_blue = np.array([130, 255, 255])

    # Red range
    lower_red = np.array([0, 31, 255])
    upper_red = np.array([176, 255, 255])

    # white range
    lower_white = np.array([0, 0, 0])
    upper_white = np.array([0, 0, 255])

    # Define a mask ranging from lower to uppper
    mask = cv2.inRange(hsv, lower_green, upper_green)
    # Do masking
    res = cv2.bitwise_and(image, image, mask=mask)
    # convert to hsv to gray
    res_bgr = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
    res_gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

    # Defining a kernel to do morphological operation in threshold image to
    # get better output.
    kernel = np.ones((13, 13), np.uint8)
    thresh = cv2.threshold(res_gray, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)

    # find contours in threshold image
    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    prev = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
def main():
    start = int(round(time.time() * 1000))

    cam = cv2.VideoCapture(1, cv2.CAP_DSHOW)
    cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
    retval, frame = cam.read()
    if retval != True:
        #    #replace this by popup window
        raise ValueError("Can't read frame. Check if webcam is connected !!")
    cv2.imwrite('img2.jpg', frame)
    # cv2.imshow("img1", frame)
    img = cv2.imread('img2.jpg')
    img_hsv = cv2.imread('img2.jpg')

    # img = cv2.imread('queentest.jpg')
    # img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # detect top left corner
    hsvY = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    lower_rangeY = np.array([30, 60, 195])
    upper_rangeY = np.array([40, 255, 255])
    maskY = cv2.inRange(hsvY, lower_rangeY, upper_rangeY)
    pointsY = cv2.findNonZero(maskY)
    avgY = np.mean(pointsY, axis=0)
    # print(avgY)
    # print(avgY[0])
    yellowX = int(avgY[0][0])
    yellowY = int(avgY[0][1])
    # print(yellowX)
    # print(yellowY),

    # detect bottom right corner
    hsvP = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    lower_rangeP = np.array([150, 100, 175])
    upper_rangeP = np.array([165, 255, 255])
    maskP = cv2.inRange(hsvP, lower_rangeP, upper_rangeP)
    pointsP = cv2.findNonZero(maskP)
    avgP = np.mean(pointsP, axis=0)
    # print(avgP)
    # print(avgP[0])
    pinkX = int(avgP[0][0])
    pinkY = int(avgP[0][1])
    # print(pinkX)
    # print(pinkY)
    ##replace r by detected colour points
    ####r = cv2.selectROI(img)
    ####print(r[0])
    ####print(r[1])
    ####print(r[2])
    ####print(r[3])
    ####print(int(r[1] + r[3]))
    ####print(int(r[0] + r[2]))
    ##r0=x1 r1=y1 r2=xadd r3=yadd r0+r2=x2 r1+r3=y2
    # crop to area of interest, in this case board using detected cornerpoint
    img = img[int(yellowY):int(pinkY), int(yellowX):int(pinkX)]
    img_hsv = img_hsv[int(yellowY):int(pinkY), int(yellowX):int(pinkX)]

    # ranges for b&w
    hsv = cv2.cvtColor(img_hsv, cv2.COLOR_BGR2HSV)
    white_lower_range = np.array([0, 0, 200])
    white_upper_range = np.array([179, 75, 255])
    black_lower_range = np.array([0, 0, 0])
    black_upper_range = np.array([179, 255, 50])
    # circle detection to know where to look for black and white
    img = cv2.medianBlur(img, 5)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    circles = cv2.HoughCircles(gray,
                               cv2.HOUGH_GRADIENT,
                               1,
                               20,
                               param1=50,
                               param2=35,
                               minRadius=25,
                               maxRadius=50)

    # detect red for queen finding
    lower_range1R = np.array([0, 75, 100])
    upper_range1R = np.array([15, 225, 255])
    lower_range2R = np.array([130, 75, 100])
    upper_range2R = np.array([179, 225, 255])
    mask1R = cv2.inRange(hsv, lower_range1R, upper_range1R)
    mask2R = cv2.inRange(hsv, lower_range2R, upper_range2R)
    maskR = mask1R | mask2R

    circles = np.uint16(np.around(circles))
    circle_color = list()
    # for circles in list draw and check if b or w
    for i in circles[0, :]:
        # print("next piece")
        # draw the outer circle
        cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
        # draw the center of the circle
        cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)
        h, w = gray.shape
        mask = np.zeros((h, w), np.uint8)
        cv2.circle(mask, (i[0], i[1]), i[2], (255, 255, 255), thickness=-1)
        masked_data = cv2.bitwise_and(img, img, mask=mask)
        _, thresh = cv2.threshold(mask, 1, 255, cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
        x, y, w, h = cv2.boundingRect(contours[0])
        # print(x)
        # print(y)
        # print(w)
        # print(h)
        crop = masked_data[y:y + h, x:x + w]
        # cv2.imshow('crop', crop)
        # print(i[0])
        # print(i[1])
        # print(i[2])
        # mean, _ = cv2.meanStdDev(crop)
        queenCntr = 0
        blackCntr = 0
        whiteCntr = 0
        # print(img[i[1],i[0]])
        for row in range(y, y + h):
            for col in range(x, x + w):
                if (math.sqrt((i[0] - col)**2 + (i[1] - row)**2) <= i[2]):
                    # print("i got here")
                    blue = img[row, col, 0]
                    green = img[row, col, 1]
                    red = img[row, col, 2]
                    # print(blue)
                    # print(green)
                    # print(red)
                    mean = int((int(blue) + int(green) + int(red)) / 3)
                    # print(mean)
                    if (maskR[row, col] > 0):
                        queenCntr = queenCntr + 1
                    # print("queencntr++")
                    if (mean < 127.5):
                        # print("blackCntr++")
                        blackCntr = blackCntr + 1
                    # print(blackCntr)
                    else:
                        # print("whitecntr++")
                        whiteCntr = whiteCntr + 1
                    # print(whiteCntr)
        # print(blackCntr)
        # print(whiteCntr)
        # print(queenCntr)
        if (queenCntr > 200):
            if (blackCntr > whiteCntr):
                # print('black')
                circle_color.append((i[0], i[1], 1, 1))
            else:
                # print('white')
                circle_color.append((i[0], i[1], 2, 1))
        else:
            if (blackCntr > whiteCntr):
                # print('black')
                circle_color.append((i[0], i[1], 1, 0))
            else:
                # print('white')
                circle_color.append((i[0], i[1], 2, 0))
    # print output
    # print(circle_color)
    # convert pixel coords to board coords
    h, w = gray.shape
    square_w = w / 8
    square_h = h / 8
    # print(w)
    # print(h)
    circle_position = list()
    for c in circle_color:
        x_pos = 0
        y_pos = 0
        for x in range(1, 9):
            if ((c[0] < x * square_w) & (c[0] > (x - 1) * square_w)):
                x_pos = x
        for y in range(1, 9):
            if ((c[1] < y * square_h) & (c[1] > (y - 1) * square_h)):
                y_pos = y
        circle_position.append((c[2], x_pos - 1, y_pos - 1, c[3]))
    # output final list with colours and board coordinates
    # output format = (colour, X, Y)
    # where colour=1 -> black, colour=2 -> white
    # X from left to right 0 -> 7
    # Y from top to bottom 0 -> 7
    print(circle_position)

    boardData = {'board': []}
    boardData['board'].append({'board': circle_position})
    with open('boardData.json', 'w') as boardState:
        json.dump(boardData, boardState)

    end = milli_sec = int(round(time.time() * 1000))
    duration = end - start
    print(duration)

    #cv2.imshow('detected circles', img)
    #cv2.imshow('detected red1', mask1R)
    #cv2.imshow('detected red2', mask2R)
    #cv2.imshow('detected red', maskR)
    #cv2.waitKey(0)
    cv2.destroyAllWindows()
def show_target(frame, frame_mask):
    dst = cv.GaussianBlur(frame_mask, (3, 3), 0)
    gray = cv.cvtColor(dst, cv.COLOR_BGR2GRAY)
    ret, binary = cv.threshold(gray, 250, 255, cv.THRESH_BINARY)  # 生成二值化图像
    target = cv.bitwise_and(frame, frame, mask=binary)
    cv.imshow("target______________", target)
示例#59
0
yprev = 10000

while True:
    ret, frame = cap.read()

    if ret == True:

        # setting a ROI
        mask = np.zeros(frame.shape, dtype=np.uint8)
        roi_corners = np.array([[(0, 720), (0, 460), (480, 230), (800, 230),
                                 (1280, 460), (1280, 720)]],
                               dtype=np.int32)
        channel_count = frame.shape[2]  # i.e. 3 or 4 depending on your img
        ignore_mask_color = (255, ) * channel_count
        cv2.fillPoly(mask, roi_corners, ignore_mask_color)
        frame = cv2.bitwise_and(frame, mask)

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # background subtraction block
        #        diff = cv2.subtract(frame, back)
        diff = back2.apply(frame)
        _, diff = cv2.threshold(diff, 5, 255, cv2.THRESH_BINARY_INV)

        # doubtful part
        #kernel = np.ones((2,2),np.uint8)
        #diff = cv2.dilate(diff,kernel,iterations = 2)
        #diff = cv2.erode(diff,kernel,iterations = 2)

        cv2.imshow('diff', diff)
示例#60
0
def Game():

    #参数设置
    bgModel = None
    cap_region_x_begin = 0.5  # start point/total width
    cap_region_y_end = 0.8  # start point/total width

    threshold = 60  # BINARY threshold(making picture obvious)

    blurValue = 41  # GaussianBlur parameter(smoothing picture)

    bgSubThreshold = 50

    learningRate = 0

    # variables

    isBgCaptured = 0  # bool, whether the background captured

    triggerSwitch = False  # if true, keyborad simulator works
    print("press 'b' to capture your background.")
    print("press 'n' to capture your gesture.")

    # Camera

    camera = cv2.VideoCapture(0)

    camera.set(10, 200)

    cv2.namedWindow('trackbar')

    cv2.createTrackbar('trh1', 'trackbar', threshold, 100, printThreshold)

    while camera.isOpened():  # capture and convert image

        ret, frame = camera.read()

        threshold = cv2.getTrackbarPos('trh1', 'trackbar')

        frame = cv2.bilateralFilter(frame, 5, 50, 100)  # smoothing filter

        frame = cv2.flip(frame, 1)  # flip the frame horizontally

        cv2.rectangle(frame, (int(cap_region_x_begin * frame.shape[1]), 0),
                      (frame.shape[1], int(cap_region_y_end * frame.shape[0])),
                      (255, 0, 0), 2)

        cv2.imshow('original', frame)

        #  Main operation

        if isBgCaptured == 1:  #  background  is captured

            fgmask = bgModel.apply(frame, learningRate=learningRate)

            # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

            # res = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

            kernel = np.ones((3, 3), np.uint8)

            fgmask = cv2.erode(fgmask, kernel, iterations=1)

            img = cv2.bitwise_and(frame, frame, mask=fgmask)

            img = img[0:int(cap_region_y_end * frame.shape[0]),
                      int(cap_region_x_begin *
                          frame.shape[1]):frame.shape[1]]  # clip the ROI

            cv2.imshow('mask', img)

            # convert the image into binary image

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)

            cv2.imshow('blur', blur)

            ret, thresh = cv2.threshold(blur, threshold, 255,
                                        cv2.THRESH_BINARY)

            cv2.imshow('ori', thresh)

            # get the coutours

            thresh1 = copy.deepcopy(thresh)

            contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)

            length = len(contours)

            maxArea = -1

            if length > 0:

                for i in range(
                        length
                ):  # find the biggest contour (according to area)

                    temp = contours[i]

                    area = cv2.contourArea(temp)

                    if area > maxArea:
                        maxArea = area

                        ci = i

                res = contours[ci]

                hull = cv2.convexHull(res)

                drawing = np.zeros(img.shape, np.uint8)

                cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)

                cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)

                hull = cv2.convexHull(
                    res, returnPoints=False
                )  # return the point index in the contour

                Flag = True
                if len(hull) > 3:

                    defects = cv2.convexityDefects(res,
                                                   hull)  # finding defects

                    if type(defects) != type(
                            None):  # avoid crashing.   (BUG not found)

                        cnt = 0

                        for i in range(
                                defects.shape[0]):  # calculate the angle

                            s, e, f, d = defects[i][0]

                            start = tuple(res[s][0])

                            end = tuple(res[e][0])

                            far = tuple(res[f][0])

                            a = math.sqrt((end[0] - start[0])**2 +
                                          (end[1] - start[1])**2)

                            b = math.sqrt((far[0] - start[0])**2 +
                                          (far[1] - start[1])**2)

                            c = math.sqrt((end[0] - far[0])**2 +
                                          (end[1] - far[1])**2)

                            angle = math.acos((b**2 + c**2 - a**2) /
                                              (2 * b * c))  # cosine theorem

                            if angle <= math.pi / 2:  # angle less than 90 degree, treat as fingers

                                cnt += 1

                                cv2.circle(drawing, far, 8, [211, 84, 0], -1)

                        isFinishCal, cnt, Flag = True, cnt, False
                if (Flag != False):
                    isFinishCal, cnt = False, 0

                if triggerSwitch is True:

                    if isFinishCal is True and cnt <= 5:
                        #To determine what the player gesture represents
                        if cnt == 0:
                            print("stone")
                            camera.release()
                            cv2.destroyAllWindows()
                            break
                        elif cnt == 1:
                            print("scissors")
                            camera.release()
                            cv2.destroyAllWindows()
                            break
                        elif cnt == 4:
                            #Change the value of cnt for easy sorting later
                            cnt = 2
                            print("paper")
                            camera.release()
                            cv2.destroyAllWindows()
                            break

            cv2.imshow('output',
                       drawing)  # drawing the contour of one's gesture

        # Keyboard OP

        k = cv2.waitKey(10)

        if k == 27:  # press ESC to exit

            camera.release()

            cv2.destroyAllWindows()

            break

        elif k == ord('b'):  # press 'b' to capture the background

            bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)

            isBgCaptured = 1

            print('!!!Background Captured!!!')

        elif k == ord('r'):  # press 'r' to reset the background

            bgModel = None

            triggerSwitch = False

            isBgCaptured = 0

            print('!!!Reset BackGround!!!')

        elif k == ord('n'):  # press 'n' to count the number

            triggerSwitch = True

            print('!!!Trigger On!!!')
    play = []
    play.append('rock')
    play.append('scissors')
    play.append('paper')
    p1 = cnt
    pc = random.randint(0, 2)
    # print p1,' ',pc,'\n'
    print("you are ", play[p1], ",and the computer is ", play[pc], "\n")
    #to judge the winner of the game.
    if (p1 == pc):
        print("Game Draw\n")
        game_result = 1
    if ((p1 == 0 and pc == 1) or (p1 == 1 and pc == 2)
            or (p1 == 2 and pc == 0)):
        print('you win!\n')
        game_result = 1
    else:
        print('you lose!\n')
        game_result = -1
    return game_result