Example #1
0
def calibrateVal(handFolder):
    print handFolder
    fnames = os.listdir(handFolder)
    fnames.sort()
    fnames = [f for f in fnames if f.find('image_') >= 0]
    n = len(fnames)/2
    i = 0
    while (i < n):
        depPath = os.path.join(folder, 'image_'+str(i)+'_dep.png')
        imgPath = os.path.join(folder, 'image_'+str(i)+'_rgb.png')

        image = cv2.imread(imgPath)
        depth = cv2.imread(depPath, -1)

        shp = (image.shape[0], image.shape[1])

        hue = np.zeros(shp, dtype='uint8')
        sat = np.zeros(shp, dtype='uint8')
        val = np.zeros(shp, dtype='uint8')

        hands = np.zeros(shp, dtype='uint8')

        hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
        cv2.split(hsv, (hue, sat, val))

        print cv2.mean(val)[0]
        i = i + 1;
 def UpdateModelFromMask(self, mask, img, hsv):
     self.avgRGB = cv.mean(img, mask)[0:3]
     self.avgHSV = cv.mean(hsv, mask)[0:3]
     distMap = cv.distanceTransform(1-mask, cv.cv.CV_DIST_L2, 5)[0]
     self.lineProbabilityMap = (1.0/(1.0+0.1*distMap))
     print self.avgRGB 
     print self.avgHSV 
Example #3
0
def on_event(event, x, y, flags, param):
    # runs when an event (such as a user click) happens on the user window

    # if it's not a left-mouse button click, ignore it
    if event != cv2.EVENT_LBUTTONDOWN:
        return

    global IMAGE, IMAGE_HSV, IMAGE_HSL, HEIGHT, WIDTH, REGION_SIDE

    # get the mean color in a region: RGB
    xmin = min(max(0, x - REGION_SIDE // 2), # floor ("integer") division
               WIDTH - REGION_SIDE)
    xmax = xmin + REGION_SIDE

    ymin = min(max(0, y - REGION_SIDE // 2),
               HEIGHT - REGION_SIDE)
    ymax = ymin + REGION_SIDE

    roi  = IMAGE[ymin:ymax, xmin:xmax]
    mean = cv2.mean(roi)

    # get the mean color: HSV
    roi_hsv  = IMAGE_HSV[ymin:ymax, xmin:xmax]
    mean_hsv = cv2.mean(roi_hsv)

    # get the mean color: HLS
    roi_hls  = IMAGE_HLS[ymin:ymax, xmin:xmax]
    mean_hls = cv2.mean(roi_hls)

    print('BGR: {:3d}, {:3d}, {:3d} | HSV: {:3d}, {:3d}, {:3d} | HLS: {:3d}, {:3d}, {:3d}'.
          format(int(round(mean[0])), int(round(mean[1])), int(round(mean[2])),
                 int(round(mean_hsv[0])), int(round(mean_hsv[1])), int(round(mean_hsv[2])),
                 int(round(mean_hls[0])), int(round(mean_hls[1])), int(round(mean_hls[2]))))
def draw_mouth(mouth, character, x, y, width, height):
    fit_image = fit_character(mouth[0], width, height)
    fit_mask = fit_character(mouth[1], width, height)
    fit_mask2 = fit_character(mouth[2], width, height)
    fit_height, fit_width = fit_image.shape[0:2]
    y_offset = y + fit_height / 6
    y_offset = max(0, min(y_offset, character.shape[0] - fit_height))
    x_offset = x + (width - fit_width) / 2
    x_offset = max(0, min(x_offset, character.shape[1] - fit_width))
    y0, y1 = y_offset, (y_offset+fit_height)
    x0, x1 = x_offset, (x_offset+fit_width)
    fit_mask = numpy.float32(fit_mask) / 255.0
    fit_mask2 = numpy.float32(fit_mask2) / 255.0
    char_region = numpy.float32(character[y0:y1,x0:x1])
    inverse_fit_mask = fit_mask * -1 + 1.0
    mul = cv2.multiply(char_region, fit_mask)
    m1 = cv2.mean(mul)
    m2 = cv2.mean(fit_mask)
    avg = numpy.float32(map(lambda x, y: x/(y * 255.0) if y else 0.0, m1, m2))
    r = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[0]
    g = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[1]
    b = numpy.ones((fit_width,fit_height),numpy.float32)  * avg[2]
    rgb = cv2.merge((r,g,b))
    rgb += (rgb * -1.0 + 0.8) * fit_mask2
    fit_image = cv2.multiply(numpy.float32(fit_image), rgb)
    fit_image = cv2.multiply(fit_image, inverse_fit_mask)
    character[y0:y1,x0:x1] = numpy.uint8(mul + fit_image)
Example #5
0
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
 def compute_color_mean(self,hull,img,color_space):
   mask = np.zeros_like(img)
   cv2.drawContours(mask,[hull],-1,(255,255,255),-1)
   if color_space == 'rgb':
     mean = np.asarray(cv2.mean(img,mask[:,:,0])[:3])
     return mean
   elif color_space == 'lab' or color_space == 'hsv':
     mean = np.asarray(cv2.mean(img,mask[:,:,0])[1:3])
     return mean
Example #7
0
def main(image1, image2, prep_mask=None, thresh=None):
    im1 = cv.imread(image1, cv.CV_LOAD_IMAGE_COLOR)
    im2 = cv.imread(image2, cv.CV_LOAD_IMAGE_COLOR)
    im11 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
    im22 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)
    if im11.shape > im22.shape:
        im2 = cv.resize(im2, (im1.shape[1], im1.shape[0]))
        im22 = cv.resize(im22, (im11.shape[1], im11.shape[0]))
    elif im11.shape < im22.shape:
        im1 = cv.resize(im11, (im2.shape[1], im2.shape[0]))
        im11 = cv.resize(im11, (im22.shape[1], im22.shape[0]))
    if prep_mask is None:
        frame_delta = cv.absdiff(im11, im22)
        frame_delta = cv.threshold(frame_delta, 25, 255, cv.THRESH_BINARY)[1]
        frame_delta = cv.erode(frame_delta, None, iterations=2)

        (cnts, _) = cv.findContours(frame_delta, cv.RETR_TREE,
                                    cv.CHAIN_APPROX_SIMPLE)

        height, width = frame_delta.shape[:2]
        for c in cnts:
            blank = np.zeros(frame_delta.shape[:2], dtype='uint8')
            cv.drawContours(blank, [c], 0, 255, -1)
            mean1 = map(lambda x: float(x),
                        cv.mean(im1, mask=blank))
            mean2 = map(lambda x: float(x),
                        cv.mean(im2, mask=blank))
            distance = cv.norm(np.array(mean1), np.array(mean2), cv.NORM_L2)
            if distance <= thresh:
                cv.drawContours(frame_delta, [c], 0, 0, -1)
            else:
                cv.drawContours(frame_delta, [c], 0, 255, -1)
    else:
        frame_delta = cv.imread(prep_mask, cv.CV_LOAD_IMAGE_COLOR)
        frame_delta = cv.cvtColor(frame_delta, cv.COLOR_BGR2GRAY)

    cv.namedWindow('image')
    cv.setMouseCallback('image', draw)
    global img, flood, rec_size
    img = frame_delta
    while(1):
        cv.imshow("image", frame_delta)
        cnt = cv.bitwise_and(im2, im2, mask=frame_delta)
        cv.imshow("pre mareka", cnt)
        k = cv.waitKey(1) & 0xFF
        if k == ord('f'):
            flood = True
        if k == ord('+'):
            rec_size += 1
            print(rec_size)
        if k == ord('-'):
            rec_size -= 1
            print(rec_size)
        if k == 27:
            break
    cv.destroyAllWindows()
    cv.imwrite("ground_truth_{}".format(sys.argv[2]), frame_delta)
Example #8
0
def rowStitch(imageA,imageB,fx,switch):
        ##################################################
        # finding information between stiched row images # 
        ##################################################

        # print " rowStitch function : finding homography"
        res_max=-1
        xA1=-1
        yA1=-1
        intervalx=16
        intervaly=16
        temp=imageB[:,:int(imageB.shape[1]*0.35)] #temp                                
        if switch==1:
            temp = cv2.Laplacian(temp,cv2.CV_32F)
        if switch==0:    
            sobelx = cv2.Sobel(temp,cv2.CV_32F,1,0,ksize=11) 
            sobely = cv2.Sobel(temp,cv2.CV_32F,0,1,ksize=11)  
            temp=sobelx+sobely # to get gradient of image in both direction
        temp=cv2.subtract(temp,cv2.mean(temp))
        score=[]
        coor=[]
        steps=16
        intervaly=imageA.shape[0]-100
        for i in range(imageA.shape[1]-int(0.35*imageB.shape[1]),imageA.shape[1],steps):
                for j in range(0,100,steps):
                                        
                                        template=imageA[j:j+intervaly,i:i+intervalx] #template                             
                                        if switch==1:
                                            template = cv2.Laplacian(template,cv2.CV_32F)
                                        if switch==0:
                                            sobelx = cv2.Sobel(template,cv2.CV_32F,1,0,ksize=11) 
                                            sobely = cv2.Sobel(template,cv2.CV_32F,0,1,ksize=11)  
                                            template=sobelx+sobely#to get gradient of image
                                        template=cv2.subtract(template,cv2.mean(template))
        
                                        res=cv2.matchTemplate(temp,template,3)
                                        _, val, _, loc = cv2.minMaxLoc(res)#val stores highest correlation from temp, loc stores coresponding starting location in temp   
                                        if(val > res_max):
                                                res_max=val
                                                xA1=i
                                                yA1=j
                                                xB1=loc[0]
                                                yB1=loc[1]
                                                # print(val)
        # print res_max,"res_max"
        pointsA=[[xA1,yA1],[xA1+intervalx,yA1],[xA1,yA1+intervaly],[xA1+intervalx,yA1+intervaly]]
        pointsB=[[xB1,yB1],[xB1+intervalx,yB1],[xB1,yB1+intervaly],[xB1+intervalx,yB1+intervaly]]
        xB1=xB1*(1/fx)
        yB1=yB1*(1/fx)
        xA1=xA1*(1/fx)
        yA1=yA1*(1/fx)
        pointsA=[[xA1,yA1],[xA1+intervalx,yA1],[xA1,yA1+intervaly],[xA1+intervalx,yA1+intervaly]]
        pointsB=[[xB1,yB1],[xB1+intervalx,yB1],[xB1,yB1+intervaly],[xB1+intervalx,yB1+intervaly]]
        H,mask=cv2.findHomography(np.asarray(pointsB,float),np.asarray(pointsA,float),cv2.RANSAC,3)
        return H,res_max
Example #9
0
    def compare_sharpness(self, path1, path2):
        im1 = self._get_gradient(cv2.imread(path1))
        im2 = self._get_gradient(cv2.imread(path2))

        # cv2.imshow("1", grad_x)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        print("img: {} means: {}".format(path1, cv2.mean(im1)))
        print("img: {} means: {}".format(path2, cv2.mean(im2)))
        
def rgbPercentage( n ):

    B = sum(cv2.mean(n[:, :, 0]))
    G = sum(cv2.mean(n[:, :, 1]))
    R = sum(cv2.mean(n[:, :, 2]))
    total_sum = R+B+G
    B = B / total_sum
    G = G / total_sum
    R = R / total_sum

    return R,G,B
Example #11
0
def weightedLuminance(grayImg, kp=None):
    """
    Foreground is middle 1/2x1/2 and bottom 1/4 strip
    Background is the rest
    """

    wlMaskFg = np.zeros(grayImg.shape, dtype=grayImg.dtype)
    height, width = grayImg.shape
    cv2.rectangle(wlMaskFg, (width/4, height/4), (3*width/4, 3*height/4), 255, -1)
    cv2.rectangle(wlMaskFg, (0, 3*height/4), (width, 3*height/4), 255, -1)
    wlMaskBg = cv2.bitwise_not(wlMaskFg)
    meanFg = cv2.mean(grayImg, mask=wlMaskFg)
    meanBg = cv2.mean(grayImg, mask=wlMaskBg)
    return (meanFg[0], meanBg[0])
Example #12
0
def noniternorm(img):
    b,g,r = cv2.split(img)
    b = np.float32(b)
    g = np.float32(g)
    r = np.float32(r)
    log_b = cv2.log(b) 
    log_g = cv2.log(g) 
    log_r = cv2.log(r) 
    b = cv2.exp(log_b - cv2.mean(log_b)[0])
    g = cv2.exp(log_g - cv2.mean(log_g)[0])
    r = cv2.exp(log_r - cv2.mean(log_r)[0])
    b = cv2.normalize(b, 0, 255, cv2.NORM_MINMAX)*255
    g = cv2.normalize(g, 0, 255, cv2.NORM_MINMAX)*255
    r = cv2.normalize(r, 0, 255, cv2.NORM_MINMAX)*255
    return cv2.merge((np.uint8(b),np.uint8(g),np.uint8(r)))
Example #13
0
    def _get_initial_classes(self):
        images = map(lambda f: cv2.imread(path.join(self._root, f)), self._files)
        self._avg_pixels = np.array([], dtype=np.uint8)

        # extract parts from each image for all of our 6 categories
        for i in range(0, self._n_objects):
            rects = self._rects[:, i]
            
            # compute maximum rectangle
            rows = np.max(rects['f2'] - rects['f0'])
            cols = np.max(rects['f3'] - rects['f1'])

            # extract annotated rectangles
            im_rects = map(lambda (im, r): im[r[0]:r[2],r[1]:r[3],:], zip(images, rects))

            # resize all rectangles to the max size & average all the rectangles
            im_rects = np.array(map(lambda im: cv2.resize(im, (cols, rows)), im_rects), dtype=np.float)
            avgs = np.around(np.average(im_rects, axis = 0))

            # average the resulting rectangle to compute 
            mn = np.around(np.array(cv2.mean(avgs), dtype='float'))[:-1].astype('uint8')

            if(self._avg_pixels.size == 0):
                self._avg_pixels = mn
            else:
                self._avg_pixels = np.vstack((self._avg_pixels, mn))
Example #14
0
def drawCorners(img):
    min_dilations = 0
    max_dilations = 7
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    for k in range(0, 6):
        for dilations in range(min_dilations, max_dilations):
    
            #cv2.adaptiveThreshold(img, thresh_img, 255,
            #        cv2.CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY, block_size, (k/2)*5)
    
            #if dilations > 0:
            #    thresh_img = cv2.dilate(thresh_img, 0, dilations - 1)

            mean = cv2.mean(gray)[0]
            thresh_level = int(mean - 10)
            thresh_level = max(thresh_level, 10)

            retval, thresh_img = cv2.threshold(gray, thresh_level, 255, cv2.THRESH_BINARY)
            cv2.dilate(thresh_img, None, thresh_img, (-1,-1), dilations)

            rows = len(thresh_img)
            cols = len(thresh_img[0])
                    
            cv2.rectangle(thresh_img, (0, 0), (cols-1, rows-1), (255, 255, 255), 3, 8)
            
            cv2.imshow("drawCorners: thresh_img " + str(k*0) + str(dilations), thresh_img)
            cv2.waitKey(1)
Example #15
0
def black_field(a, b, img):
    #print a, b
    #print b-5, b+5, a-5, b+5
    cropped_img = img[b-5:b+5, a-5:a+5]
    cv2.imshow('kropd', cropped_img)
    #print 255-cv2.mean(cropped_img)[0]
    return 255-cv2.mean(cropped_img)[0]
Example #16
0
 def __init__(self,image,conts,hier,index):
     # Image is a Kuwahara and posterized image with the same resolution as
     #  the original foil image.
     #self.Image = image #May want to make a foil image class in the future to store the resolution, foilname, size, Pt area, Dirtcount, etc. 
     self.ImageShape = image.shape
     # Index is the index of the region in the contour list
     self.Index = index
     self.Contour = conts[index]
     # Set up Hierarchy relationships:
     self.Node = hier[0,index] #4-length array defining place in hierarchy tree
     self.Next = self.Node[0]
     self.Prev = self.Node[1]
     self.FirstChild = self.Node[2]
     self.Parent = self.Node[3]
     # Create mask
     self.Outline = self.drawOutline(conts,hier)
     self.Mask = self.maskCont(self.ImageShape,conts,hier,index)
     # Region properties
     self.ContourArea=cv2.contourArea(self.Contour)# May want to mult by resolution in future
     self.OutlineArea=np.sum(self.Outline)/255
     self.Area = np.sum(self.Mask)/255
     # Mean gray level
     #cv2 mean funtion takes an image and a mask as arguments and 
     # returns an array of the mean pixel values as floats. We only need the first value
     self.GrayLevel = int(cv2.mean(image,self.Mask)[0])
 def get_features(self, img, mask):
     mean = cv2.mean(img, mask)
     mean = np.array([[mean[:3]]], dtype=np.uint8)
     mean_hsv = cv2.cvtColor(mean, cv2.COLOR_BGR2HSV)
     mean_lab = cv2.cvtColor(mean, cv2.COLOR_BGR2LAB)
     features = np.hstack((mean.flatten(), mean_hsv.flatten(), mean_lab.flatten()))
     return features
Example #18
0
    def externalCall(self):


        reslist = cv2.HoughCircles(self.inputImageName.data,
                                   cv2.cv.CV_HOUGH_GRADIENT,
                                   dp=self.dp.value,
                                   minDist=self.minDist.value,
                                   minRadius=self.minRad.value,
                                   maxRadius=self.maxRad.value,
                                   param1=self.threshold1.value,
                                   param2=self.threshold2.value)

        self.circleData.data = reslist

        if self.doCannyOutput.value:
            image = self.inputImageName.data
            canny = cv2.Canny(image, self.threshold1.value, self.threshold1.value//2)
            self.outputCannyImageName.data = canny



        if self.doDrawCircles.value:
            resvisimage = self.inputOrgImageName.data.copy()



            if reslist is not None and len(reslist):
                for x in reslist[0]:
                    corr = 5
                    mask = np.zeros(tuple(self.inputImageName.data.shape[:2]), np.uint8)
                    cv2.circle(mask, (x[0], x[1]), int(x[2]-corr), 255, -1)

                    mean_val = cv2.mean(self.inputOrgImageName.data, mask=mask)
                    mv = np.zeros((1, 1, 3), np.uint8)

                    mv[..., 0] = mean_val[0]
                    mv[..., 1] = mean_val[1]
                    mv[..., 2] = mean_val[2]

                    mv2 = cv2.cvtColor(mv, cv2.COLOR_BGR2HSV)

                    #cv2.circle(resvisimage, (x[0], x[1]), int(x[2]-corr), (mean_val[0],mean_val[1],mean_val[2]), -1)
                    self.drawText(resvisimage, str(mv2[0,0]), x[0]-40, x[1] - self.maxRad.value-4, 1)


                    if 28 > mv2[0,0,0] or mv2[0,0,0] > 32 or mv2[0,0,1] < 70 or mv2[0,0,2] < 150:
                        #continue
                        pass




                    cv2.circle(resvisimage, (x[0], x[1]), self.minRad.value, (100, 255, 100), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), self.maxRad.value, (100, 100, 255), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), self.minDist.value, (100, 100, 100), 1)
                    cv2.circle(resvisimage, (x[0], x[1]), x[2], (255, 100, 100), 2)
                    cv2.circle(resvisimage, (x[0], x[1]), 4, (50, 50, 50), -1)


            self.outputOrgCircleImageName.data = resvisimage
Example #19
0
	def contour_means(self,  minVal, maxVal, _3D=False, original=None, canny=False, array=None):
		#mean intensity of contours

		''' Note:
		cv2.drawContours(image, contours, contourIdx, color[,...
		#...thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]])->None
		'''

		if self.filtered_contours == None:
			self.filter_cnts(canny=canny, array=array, minVal=minVal, maxVal=maxVal)

		res = list(self.filtered_contours)

		if not(_3D):
			original = np.copy(self.original)
		mean_vals = []
		for i in range(len(res)):
			msk = np.zeros(original.shape,np.uint8) #zero mtx
			cv2.drawContours(msk, res, i, 255, -1)
			mean_ = cv2.mean(original,mask = msk)[0]
			mean_vals.append((i, mean_))
		mean_vals=np.array(mean_vals)

		self.mean_values = mean_vals
		return mean_vals
Example #20
0
def getColor(image, pathArray):
	mask = np.zeros(image.shape[:2], np.uint8)
	cv2.drawContours(mask, pathArray, -1, 255, -1)
	#cv2.imshow('image',image)
	meanValues = cv2.mean(image,mask = mask)
	maxIndex = meanValues.index(max(meanValues))
	return indexToColor(maxIndex)
Example #21
0
    def preprocesing(self, image):
        """
        Makes a copy of input image then makes a threasholding operation
        so we can get mask of dominant color areas. Then applies
        that mask to every channel of output image.

        Args:
            image(numpy.ndarray): Image to process

        Returns:
            Image with boosted green channel
        """
        im = image.copy()
        self.main_col = cv.mean(im)[:3]
        c_boost = cv.inRange(image, (0.25*self.main_col[0],
                                     0.25*self.main_col[1],
                                     0.25*self.main_col[2]),
                                    (1.5*self.main_col[0],
                                     2.5*self.main_col[1],
                                     1.5*self.main_col[2]))
        im[:, :, 0] = cv.bitwise_and(image[:, :, 0], c_boost)
        im[:, :, 1] = cv.bitwise_and(image[:, :, 1], c_boost)
        im[:, :, 2] = cv.bitwise_and(image[:, :, 2], c_boost)

        if self.debug:
            cv.imshow("preprocesing", im)
            cv.imwrite("preprocesing.png", im)
        return im
Example #22
0
def grayWorld(img):
    channels = cv2.split(img)
    meanB = int(cv2.mean(channels[0])[0])
    meanG = int(cv2.mean(channels[1])[0])
    meanR = int(cv2.mean(channels[2])[0])
    img = cv2.merge(channels, img)
    rows, cols, channel = img.shape 
    for i in xrange(rows):
        for j in xrange(cols):
            temp = img[i,j]
            mean = np.mean(temp)
            scaleB = mean/meanB
            scaleG = mean/meanG
            scaleR = mean/meanR
            img[i,j] = [scaleB*temp[0], scaleG*temp[1], scaleR*temp[2]]
    return img
Example #23
0
 def __init__(self, frame_number, filename, frame):
     self.filename = filename
     self.frame_number = frame_number
     b, g, r, a = cv2.mean(frame)
     self.frame_color_RGB = (r, g, b)
     self.frame_color_HSV = colorsys.rgb_to_hls(r, g, b)
     cv2.imwrite(filename, frame)
    def label(self, image, c):

        # construct a mask for the contour, then compute the
        # average L*a*b* value for the masked region
        mask = np.zeros(image.shape[:2], dtype='uint8')
        cv2.drawContours(mask, [c], -1, 255, -1)
        mask = cv2.erode(mask, None, iterations=2)
        mean = cv2.mean(image, mask=mask)[:3]

        # init the min distance found thus far
        minDist = (np.inf, None)

        # loop over the known L*a*b* color values
        for (i, row) in enumerate(self.lab):
            # compute the distance between the current L*a*b*
            # color value and the mean of the image
            d = dist.euclidean(row[0], mean)

            # if the distance is smaller than the current distance
            # then update the bookkeeping variable
            if d < minDist[0]:
                minDist = (d, i)

        # return the name of the color with the smallest distance
        return self.color_names[minDist[1]]
Example #25
0
    def label(self, image):
        # construct a mask for the contour, then compute the
        # average L*a*b* value for the masked region
        # mask = np.zeros(image.shape[:2], dtype="uint8")
        # cv2.drawContours(mask, [c], -1, 255, -1)
        # mask = cv2.erode(mask, None, iterations=2)
        mean = cv2.mean(image)[:3]

        # initialize the minimum distance found thus far
        mindist = (np.inf, None)

        # loop over the known L*a*b* color values
        for (i, row) in enumerate(self.lab):
            # compute the distance between the current L*a*b*
            # color value and the mean of the image
            # d = dist.euclidean(row[0], mean)
            d = np.linalg.norm(row[0] - mean)

            # if the distance is smaller than the current distance,
            # then update the bookkeeping variable
            if d < mindist[0]:
                mindist = (d, i)

        # return the name of the color with the smallest distance
        if mindist[0] > 80:
            return "N/A"
        return self.colorNames[mindist[1]]
Example #26
0
def morph_mean(src, angle):
    dst = ndimage.rotate(src, angle)
    strel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 16))
    dst = cv2.morphologyEx(dst, cv2.MORPH_OPEN, strel)
    mean = cv2.mean(dst)[0]
    print mean
    return mean
    def label(self, image, c):
        '''
        Figure 1: (Right) The original image. (Left) The mask image for the blue pentagon at the bottom of the image,
        indicating that we will only perform computations in the 'white' region of the image, ignoring the black background.

        Notice how the foreground region of the mask is set to white, while the background is set to black.
        We'll only perform computations within the masked (white) region of the image.

        We also erode the mask slightly to ensure statistics are only being computed for the masked region and that no background
        is accidentally included (due to a non-perfect segmentation of the shape from the original image, for instance).
        '''

        # construct a mask for the contour, then compute the
        # average L*a*b* value for the masked region
        mask = np.zeros(image.shape[:2], dtype='uint8')
        cv2.drawContours(mask, [c], -1, 255, -1)
        mask = cv2.erode(mask, None, iterations=2)
        mean = cv2.mean(image, mask=mask)[:3]

        # init the min distance found thus far
        minDist = (np.inf, None)

        # loop over the known L*a*b* color values
        for (i, row) in enumerate(self.lab):
            # compute the distance between the current L*a*b*
            # color value and the mean of the image
            d = dist.euclidean(row[0], mean)

            # if the distance is smaller than the current distance
            # then update the bookkeeping variable
            if d < minDist[0]:
                minDist = (d, i)

        # return the name of the color with the smallest distance
        return self.colorNames[minDist[1]]
Example #28
0
import sys;
import cv2;
import numpy as np;
import operator;
import copy;

RawImage = cv2.imread(sys.argv[1]);

# 1. Enhance contrast;
Alpha = 3.0;
Beta = -Alpha*np.mean(cv2.mean(RawImage)[0:3]);
Contrast = cv2.convertScaleAbs(RawImage, alpha = Alpha, beta = Beta);

# 2. Blur;
sigmaColor = [40, 200];
sigmaSpace = 10;
Blur1 = cv2.bilateralFilter(Contrast, d = 0, sigmaColor = sigmaColor[0], sigmaSpace = sigmaSpace, borderType = cv2.BORDER_WRAP);
Blur2 = cv2.bilateralFilter(Contrast, d = 0, sigmaColor = sigmaColor[1], sigmaSpace = sigmaSpace, borderType = cv2.BORDER_WRAP);

# 3. Canny;
Edges1 = cv2.Canny(Blur1, 100, 300);
Edges2 = cv2.Canny(Blur2, 80, 150);

# 4. Crop:
Margin = 5;
Edges1 = Edges1[Margin:-Margin, Margin:-Margin];
Edges2 = Edges2[Margin:-Margin, Margin:-Margin];

# 5. Dilatation:
KernelSize = 3;
Kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (KernelSize, KernelSize));
# -*- coding:utf-8 -*-
import cv2 as cv
import numpy as np

if __name__ == '__main__':
    # 新建矩阵array
    array = np.array([1, 2, 3, 4, 5, 10, 6, 7, 8, 9, 10, 0])
    # 将array调整为3*4的单通道图像img1
    img1 = array.reshape((3, 4))
    # 将array调整为3*2*2的多通道图像img2
    img2 = array.reshape((3, 2, 2))

    # 分别计算图像img1和图像img2的平均值和标准差
    mean_img1 = cv.mean(img1)
    mean_img2 = cv.mean(img2)

    mean_std_dev_img1 = cv.meanStdDev(img1)
    mean_std_dev_img2 = cv.meanStdDev(img2)

    # 输出cv.mean()函数计算结果
    print('cv.mean()函数计算结果如下:')
    print('图像img1的均值为:{}'.format(mean_img1))
    print('图像img2的均值为:{}\n第一个通道的均值为:{}\n第二个通道的均值为:{}'.format(
        mean_img2, mean_img2[0], mean_img2[1]))
    print('*' * 30)
    # 输出cv.meanStdDev()函数计算结果
    print('cv.meanStdDev()函数计算结果如下:')
    print('图像img1的均值为:{}\n标准差为:{}'.format(mean_img1[0],
                                          float(mean_std_dev_img1[1])))
    print('图像img2的均值为:{}\n第一个通道的均值为:{}\n第二个通道的均值为:{}\n'
          '标准差为:{}\n第一个通道的标准差为:{}\n第二个通道的标准差为:{}\n'.format(
                MA = MA/2
                eccentricity = np.sqrt(1 - pow(MA, 2)/pow(ma, 2))
                contour_eccentricity.append(eccentricity)
            #eight:aspect_ratio
            aspect_ratio = float(w)/h
            contour_aspectratio.append(aspect_ratio)
            #Nine: extent
            obj_area = cv2.contourArea(cnt)
            rect_area = w*h
            extent = float(obj_area)/rect_area
            contour_extent.append(extent)
            #Ten: Solidity
            solidity = obj_area / area
            contour_solidity.append(solidity)
            #11:RGB mean intensity
            mean_val_BGR = cv2.mean(image,mask = mask)
            contour_RGB.append(mean_val_BGR)
            #12:
            mean_val_HSV = cv2.mean(HSV,mask = mask)
            contour_HSV.append(mean_val_HSV)
            #13:
            mean_val_LAB = cv2.mean(LAB,mask = mask)
            contour_LAB.append(mean_val_LAB)


        if area <= 30000 and area >= 3000  and w < 300 and w > 90  and PPD_score>=0.65 and PPD_score <=0.88 and abs(ma-MA)<=200: #and reck <0.6 and reck > 0.15 and abs(ma-MA)<=200:#area
            all.append(hull)
            text = 'black tube'
            cv2.putText(image,text,(100,100),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
    if all != []:
        num += 1
    # initialize the progress bar
    widgets = ["Building Dataset: ", progressbar.Percentage(), " ",
               progressbar.Bar(), " ", progressbar.ETA()]
    pbar = progressbar.ProgressBar(maxval=len(paths),
                                   widgets=widgets).start()
    
    #loop over the image paths
    for i, (path, label) in enumerate(zip(paths, labels)):
        #load the image and preprocess it
        image = cv2.imread(path)
        image = aap.preprocess(image)
        image = itap.preprocess(image)
        
        #compute the mean of each channel in each image in the training set and update the lists
        if dType=='train':
            g, b, r = cv2.mean(image)[:3]
            B.append(b)
            G.append(g)
            R.append(r)
        #add the image and label to the hdf5 writer
        writer.add([image], [label])
        pbar.update(i)
    
    pbar.finish()
    writer.close()
    
    
#calculate the average RGB values over all images in the dataset, and then serialize
print('Serializing...')
avgs = {'R' : np.mean(R), 'G' : np.mean(G), 'B' : np.mean(B)}
file = config.DATASET_MEAN
Example #32
0
def videoThread():
    #INIT WINDOW
    cv2.namedWindow("Composed", cv2.CV_WINDOW_AUTOSIZE)
    #cv2.namedWindow("img", cv2.CV_WINDOW_AUTOSIZE)
    #cv2.namedWindow("imageHSV_hist", cv2.CV_WINDOW_AUTOSIZE)
    cv2.startWindowThread()

    #CONFIG CAPTURE
    capture = cv2.VideoCapture(0)

    #CONFIG CAPTURE VIDEO
    w = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
    h = int(capture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
    print 'hauteur %d et largeur %d' % (h, w)
    out = cv2.VideoWriter('output.avi', cv2.cv.CV_FOURCC('F', 'M', 'P', '4'),
                          10, (2 * w, h), 1)

    #INITIALISATION DES BOOL
    boolBoucle = True
    boolVideo = False

    while (boolBoucle):
        #CAPTURE
        capture.read()
        ret, img = capture.read()
        rows, cols, channels = img.shape

        #TRAITEMENT VIDEO
        """
        # separer les couleurs
        b,g,r = cv2.split(img)
        # egaliser l'hisograme sur chaque couleur
        b=cv2.equalizeHist(b)
        g=cv2.equalizeHist(g)
        r=cv2.equalizeHist(r)
        # refusioner l'image
        img = cv2.merge((b,g,r))
        """

        #blur
        img = cv2.GaussianBlur(img, (5, 5), 0)

        #HSV
        imageHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        #NORMALISATEUR DE LUMINOSITE
        # separer les valeurs
        h, s, v = cv2.split(imageHSV)
        # Calcul de moyenne sur luminosite +offset
        meanValv = cv2.mean(v)
        diffv = 125 - meanValv[0]
        v = cv2.convertScaleAbs(v, beta=diffv)
        # refusioner l'image
        imageHSV = cv2.merge((h, s, v))

        #re-convertir en rgb
        imageHSV_hist = cv2.cvtColor(imageHSV, cv2.COLOR_HSV2BGR)

        #thresholding
        min_red = np.array((0., 125., 125.))
        max_red = np.array((7., 255., 255.))

        min_red2 = np.array((170., 125., 125.))
        max_red2 = np.array((180., 255., 255.))

        imgThresh = cv2.inRange(imageHSV, min_red, max_red)
        imgThresh2 = cv2.inRange(imageHSV, min_red2, max_red2)
        imgThreshT = cv2.bitwise_or(imgThresh, imgThresh2)

        #close function (effacer les trous)
        kernel = np.ones((7, 7), np.uint8)
        closing = cv2.morphologyEx(imgThreshT, cv2.MORPH_CLOSE, kernel)
        """
        #Canny
        canny = dst = cv2.Canny( blur, 0, 255 )
        """

        #repere
        pt1 = (cols / 2, 0)
        pt2 = (cols / 2, rows)
        cv2.line(img, pt1, pt2, [255, 255, 255], 1)
        pt3 = (0, rows / 2)
        pt4 = (cols, rows / 2)
        cv2.line(img, pt3, pt4, [255, 255, 255], 1)

        #draw square
        sq1 = (cols / 2 + 100, rows / 2 + 100)
        sq2 = (cols / 2 + 100, rows / 2 - 100)
        sq3 = (cols / 2 - 100, rows / 2 - 100)
        sq4 = (cols / 2 - 100, rows / 2 + 100)

        cv2.line(img, sq1, sq2, [255, 255, 255], 1)
        cv2.line(img, sq2, sq3, [255, 255, 255], 1)
        cv2.line(img, sq3, sq4, [255, 255, 255], 1)
        cv2.line(img, sq4, sq1, [255, 255, 255], 1)

        #contour
        contours, hier = cv2.findContours(closing, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)

        #Filtrage de contour
        #init list element filtre
        cnt_filt = []
        for cnt in contours:
            CurrAera = cv2.contourArea(cnt)
            if CurrAera > 1500:  # remove small areas like noise etc
                #hull = cv2.convexHull(cnt)    # find the convex hull of contour
                hull = cv2.approxPolyDP(cnt, 0.02 * cv2.arcLength(cnt, True),
                                        True)
                approx = cv2.convexHull(cnt)
                #if len(hull)==12:
                if not cv2.isContourConvex(hull) and len(hull) <= 12:
                    m = cv2.moments(hull)
                    n = cv2.moments(approx)
                    if m['m00'] != 0:
                        barycentre = (int(m['m10'] / m['m00']),
                                      int(m['m01'] / m['m00']))
                        cv2.drawContours(img, [hull], 0, (0, 0, 255), 2)
                        barycentre_2 = (int(n['m10'] / n['m00']),
                                        int(n['m01'] / n['m00']))

                        cv2.circle(img, barycentre_2, 4, (255, 0, 255), -1)
                        cv2.drawContours(img, [approx], 0, (0, 255, 255), 2)
                        cv2.line(img, barycentre, barycentre_2,
                                 [255, 255, 255], 1)
                        #register info
                        cnt_filt.append(
                            (CurrAera, barycentre, barycentre_2, hull))

        #trie des contour filtre par aire les plus grandre
        cnt_filt.sort(reverse=True)

        #print cnt_filt
        if len(cnt_filt) > 0:
            cv2.circle(img, cnt_filt[0][2], 4, (255, 0, 0), -1)

            (a, b) = cnt_filt[0][2]
            ptx = (a, rows / 2)
            pty = (cols / 2, b)
            cv2.line(img, cnt_filt[0][2], ptx, [0, 0, 255], 1)
            cv2.line(img, cnt_filt[0][2], pty, [0, 0, 255], 1)

            yaw = angle(cnt_filt[0][1], cnt_filt[0][2])

            distances = interdistance(cnt_filt[0][1], cols, rows)
            #print distances, yaw
            cv2.drawContours(img, [cnt_filt[0][3]], 0, (0, 255, 0), 2)

            if QueueVideo.full():
                QueueVideo.get()
            #Put item in queue
            sendItem = ('DETECT', distances, yaw)
            QueueVideo.put(sendItem)

        else:
            if QueueVideo.full():
                QueueVideo.get()
            #Put item in queue
            sendItem = ('KO')
            QueueVideo.put(sendItem)

        #TEST ACTION
        act = toucheAction(20)
        if act:
            print 'Action:', act
            if act == 'save':
                cv2.imwrite('capture.png', img)
            elif act == 'esc':
                boolBoucle = False
            elif act == 'video':
                boolVideo = not boolVideo
                if boolVideo:
                    print 'video: REC'
                else:
                    print 'video: PAUSE'

        #CREATE COMPOSED IMAGE
        rows, cols, channels = img.shape
        compoImage = np.zeros((rows, 2 * cols, 3), np.uint8)
        compoImage[0:rows, 0:cols] = img
        imgThreshTRGB = cv2.cvtColor(imgThreshT, cv2.COLOR_GRAY2BGR)
        compoImage[0:rows, cols:2 * cols] = imgThreshTRGB
        #CAPTURE VIDEO
        if boolVideo:
            out.write(compoImage)

        #AFFICHAGE# separer les couleurs
        #cv2.imshow("imageHSV_hist", imageHSV_hist)
        #cv2.imshow("imgThresh", imgThresh)
        #cv2.imshow("imgThresh2", imgThresh2)
        #cv2.imshow("imgThreshT", imgThreshT)
        #cv2.imshow("cont", cont)
        #cv2.imshow("canny", canny)
        #cv2.imshow("img", img)
        #cv2.imshow("closing", closing)
        cv2.imshow("Composed", compoImage)

    capture.release()
    cv2.destroyAllWindows()
Example #33
0
import numpy as np
import cv2

cap = cv2.VideoCapture(1 + cv2.CAP_V4L)

cap.set(cv2.CAP_PROP_CONVERT_RGB, 0) # turn off RGB conversion

while(True):
    # Capture frame-by-frame
    _, frame = cap.read()
    bf81 = np.array(frame // 16, dtype=np.uint8)
    # Create the mask
    #binary = cv2.imread('Masked_Image.png', 0)
    _, binary = cv2.threshold(bf81, 50, 255, cv2.THRESH_BINARY)
    im3 = cv2.bitwise_and(bf81, binary)
    im3[binary == 0] = 0
    Mean = cv2.mean(bf81, binary)
    print('Mean =', *Mean[:1], sep=''), " \r",
    cv2.imshow('Mask', binary)
    cv2.imshow('Original', bf81)
    cv2.imshow('Masked Image', im3)
    # detect waitkey of q to quit
    key = cv2.waitKey(1) & 0xFF
    if key == ord("q"):
        break
Example #34
0
                                  (int(xmax * expected), int(ymax * expected)),
                                  (255, 255, 255), 2)
                    cv2.putText(
                        crop_img, className,
                        (int(xmin * expected), int(ymin * expected) - 5),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))
                    depth = np.asanyarray(aligned_depth_frame.get_data())
                    # Crop depth data:
                    depth = depth[xmin_depth:xmax_depth,
                                  ymin_depth:ymax_depth].astype(float)

                    # Get data scale from the device and convert to meters
                    depth_scale = profile.get_device().first_depth_sensor(
                    ).get_depth_scale()
                    depth = depth * depth_scale
                    dist, _, _, _ = cv2.mean(depth)

                    cv2.rectangle(colorized_depth, (xmin_depth, ymin_depth),
                                  (xmax_depth, ymax_depth), (255, 255, 255), 2)

                    cv2.putText(colorized_depth, className,
                                (xmin_depth, ymin_depth - 5),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))
                    cv2.putText(colorized_depth, str(round(dist, 2)),
                                (xmin_depth, ymin_depth + 20),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 255, 255))

        if windowSelector == 1:
            #cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
            cv2.namedWindow(
                'RealSense', cv2.WINDOW_NORMAL
args = vars(ap.parse_args())

print("[INFO] describing images...")
imagePaths = list(paths.list_images(args["dataset"]))

#rawImages = []
features = [["dat1", "dat2", "dat3", "dat4", "dat5", "dat6", "clase"]]

for (i, imagePath) in enumerate(imagePaths):
    #print(imagePath)

    image = cv2.imread(imagePath)
    #image1 = cv2.resize(image, (32, 32)).flatten() #entrada1
    image = cv2.resize(image, (32, 32))

    means = cv2.mean(image)
    #print(means)
    means = means[:3]
    #print(means)
    (means, stds) = cv2.meanStdDev(image)
    #print(means, stds)
    stats = np.concatenate([means, stds]).flatten()
    print(stats)

    #	hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    #	hist = cv2.calcHist([hsv], [0, 1, 2], None, [8, 8, 8], [0, 180, 0, 256, 0, 256])
    #	feature = cv2.normalize(hist, hist).flatten()  #entrada2

    label = imagePath.split(os.path.sep)[-1].split(".")[0]

    #image1=np.array(image1)
Example #36
0
        #print d

        if area >= biggest_area and d < distance_from_center:
            biggest_area = area
            biggest_cnt[0] = cnt
            cxmax = cx
            cymax = cy
            #print biggest_area
if biggest_area != 0:

    #cv2.drawContours(img_true, [biggest_cnt[0]], 0,(0,0,255),1)

    for h, cnt in enumerate(biggest_cnt[0]):
        mask = np.zeros(imgray.shape, np.uint8)
        cv2.drawContours(mask, [biggest_cnt[0]], 0, 255, -1)
        mean = cv2.mean(img_true, mask=mask)

#print mean
mean = colorsys.rgb_to_hsv(mean[2] / 255, mean[1] / 255, mean[0] / 255)
hsv = list(mean)
hsv[0] = hsv[0] * 360
print hsv

if hsv[2] < 0.1:
    color = 'black'
elif (hsv[0] < 11 or hsv[0] > 351) and hsv[1] > .7 and hsv[2] > .1:
    color = 'red'
elif (hsv[0] > 64 and hsv[0] < 150) and hsv[1] > .15 and hsv[2] > .1:
    color = 'green'
elif (hsv[0] > 180 and hsv[0] < 255) and hsv[1] > .15 and hsv[2] > .1:
    color = 'blue'
Example #37
0
def laplacian(img):
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img_sobel = cv2.Laplacian(img_gray, cv2.CV_16U)
    return cv2.mean(img_sobel)[0]
Example #38
0
 def getBlurredImages(self):
     return [
         np.full(img.shape,
                 cv2.mean(img)[:3]) / 255 for img in self.images
     ]
    for i, c in enumerate(contours):#オブジェクトごとに処理

        #オブジェクトの座標を算出
        M = cv2.moments(c)#モーメントを計算
        g.append((int(M['m10']/M['m00']),int(M['m01']/M['m00'])))#重心(x,y)をリストgに追加

        #輪郭線の外接する矩形領域を抽出
        x,y,w,h = cv2.boundingRect(c)
        R_img = blueImg[y:y+h, x:x+w]
        R_bin = np.zeros((h, w),np.uint8)#オブジェクトのマスク。R_bin = img_binarized[y:y+h, x:x+w]だと矩形内に複数物体があるとアウト。
        cv2.drawContours(R_bin, [c[:,0]-(x,y)], 0, 255, -1)

        #柿かピーかを判定する
        #ToDo:ROIに見切れている柿ピーを除外すべき。(g[i][1]の範囲でスクリーニングする)
        #ToDo:面積でもスクリーニングをした方がよいか?
        mean_val = cv2.mean(R_img, mask = R_bin)
        if mean_val[0] < JUDGE_THRESHOLD:
            judge.append('K')#柿
        else:
            judge.append('P')#ピー

        #前回のフレームと今回のフレームのオブジェクトを判別。同一オブジェクトに対して2回以上分別命令を送らないように。
        flag = 0
        for k, jp in enumerate(judge_prev):
            #↓同一のオブジェクトとみなす条件式。画面右から左に(y負方向)柿ピーが流れていくとする
            if judge[i] == jp and abs(g[i][1] - g_prev[k][1] + VELOCITY) < DISPLACEMENT_THRESHOLD and abs(g[i][0] - g_prev[k][0]) < DISPLACEMENT_THRESHOLD:
                #ToDo:もしも分別タイミングの制御がシビアなら、速度(-g[i][1]+g_prev[j][1])/dtから分配部到達予想時間を求めるなどの工夫が必要
                flag = 1
                break
            else:
                pass
Example #40
0
    def handle_image(self, image_msg):
        # converting the ROS image message to CV2-image
        image = self.bridge.imgmsg_to_cv2(image_msg, 'bgr8')

        if self._first_callback:
            mean = cv2.mean(image)

            if sum(mean) < self._blind_threshold:
                self._speak("Hey!   Remove my camera cap!",
                            self.speak_publisher)

        # setup detectors
        self.field_boundary_detector.set_image(image)
        self.obstacle_detector.set_image(image)
        self.line_detector.set_image(image)

        self.runtime_evaluator.set_image()

        self.ball_detector.set_image(image)

        if self.config['vision_parallelize']:
            self.field_boundary_detector.compute_all(
            )  # computes stuff which is needed later in the processing
            fcnn_thread = threading.Thread(
                target=self.ball_detector.compute_top_candidate)
            conventional_thread = threading.Thread(
                target=self._conventional_precalculation())

            conventional_thread.start()
            fcnn_thread.start()

            conventional_thread.join()
            fcnn_thread.join()
        else:
            self.ball_detector.compute_top_candidate()
            self._conventional_precalculation()

        # TODO: handle all ball candidates

        #"""
        ball_candidates = self.ball_detector.get_candidates()

        if ball_candidates:
            balls_under_field_boundary = self.field_boundary_detector.balls_under_convex_field_boundary(
                ball_candidates)
            if balls_under_field_boundary:
                sorted_rated_candidates = sorted(balls_under_field_boundary,
                                                 key=lambda x: x.rating)
                top_ball_candidate = list([
                    max(sorted_rated_candidates[0:1], key=lambda x: x.rating)
                ])[0]
            else:
                top_ball_candidate = None
        else:
            top_ball_candidate = None
        """
        # check whether ball candidates are under the field_boundary
        # TODO: handle multiple ball candidates
        top_ball_candidate = self.ball_detector.get_top_candidate()
        if top_ball_candidate:
            ball = []
            ball.append(top_ball_candidate)
            ball_under_field_boundary = self.field_boundary_detector.balls_under_field_boundary(ball)
            if ball_under_field_boundary:
                top_ball_candidate = ball_under_field_boundary[0]
            else:
                top_ball_candidate = None
        #"""

        # check whether ball candidates are over rating threshold
        if top_ball_candidate and top_ball_candidate.rating > self._ball_candidate_threshold:
            # create ball msg
            # TODO: publish empty msg if no top candidate as described in msg description
            balls_msg = BallsInImage()
            balls_msg.header.frame_id = image_msg.header.frame_id
            balls_msg.header.stamp = image_msg.header.stamp

            ball_msg = BallInImage()
            ball_msg.center.x = top_ball_candidate.get_center_x()
            ball_msg.center.y = top_ball_candidate.get_center_y()
            ball_msg.diameter = top_ball_candidate.get_diameter()
            ball_msg.confidence = 1

            balls_msg.candidates.append(ball_msg)
            self.debug_printer.info('found a ball! \o/', 'ball')
            self.pub_balls.publish(balls_msg)

        # create goalpost msg
        goal_parts_msg = GoalPartsInImage()
        goal_parts_msg.header.frame_id = image_msg.header.frame_id
        goal_parts_msg.header.stamp = image_msg.header.stamp

        # create obstacle msg
        obstacles_msg = ObstaclesInImage()
        obstacles_msg.header.frame_id = image_msg.header.frame_id
        obstacles_msg.header.stamp = image_msg.header.stamp
        for red_obs in self.obstacle_detector.get_red_obstacles():
            obstacle_msg = ObstacleInImage()
            obstacle_msg.color = ObstacleInImage.ROBOT_MAGENTA
            obstacle_msg.top_left.x = red_obs.get_upper_left_x()
            obstacle_msg.top_left.y = red_obs.get_upper_left_y()
            obstacle_msg.height = int(red_obs.get_height())
            obstacle_msg.width = int(red_obs.get_width())
            obstacle_msg.confidence = 1.0
            obstacle_msg.playerNumber = 42
            obstacles_msg.obstacles.append(obstacle_msg)
        for blue_obs in self.obstacle_detector.get_blue_obstacles():
            obstacle_msg = ObstacleInImage()
            obstacle_msg.color = ObstacleInImage.ROBOT_CYAN
            obstacle_msg.top_left.x = blue_obs.get_upper_left_x()
            obstacle_msg.top_left.y = blue_obs.get_upper_left_y()
            obstacle_msg.height = int(blue_obs.get_height())
            obstacle_msg.width = int(blue_obs.get_width())
            obstacle_msg.confidence = 1.0
            obstacle_msg.playerNumber = 42
            obstacles_msg.obstacles.append(obstacle_msg)

        if self.config['vision_ball_classifier'] == 'yolo':
            candidates = self.goalpost_detector.get_candidates()
            for goalpost in candidates:
                post_msg = PostInImage()
                post_msg.width = goalpost.get_width()
                post_msg.confidence = goalpost.get_rating()
                post_msg.foot_point.x = goalpost.get_center_x()
                post_msg.foot_point.y = goalpost.get_lower_right_y()
                post_msg.top_point = post_msg.foot_point
                goal_parts_msg.posts.append(post_msg)
        else:
            for white_obs in self.obstacle_detector.get_white_obstacles():
                post_msg = PostInImage()
                post_msg.width = white_obs.get_width()
                post_msg.confidence = 1.0
                post_msg.foot_point.x = white_obs.get_center_x()
                post_msg.foot_point.y = white_obs.get_lower_right_y()
                post_msg.top_point = post_msg.foot_point
                goal_parts_msg.posts.append(post_msg)
        for other_obs in self.obstacle_detector.get_other_obstacles():
            obstacle_msg = ObstacleInImage()
            obstacle_msg.color = ObstacleInImage.UNDEFINED
            obstacle_msg.top_left.x = other_obs.get_upper_left_x()
            obstacle_msg.top_left.y = other_obs.get_upper_left_y()
            obstacle_msg.height = int(other_obs.get_height())
            obstacle_msg.width = int(other_obs.get_width())
            obstacle_msg.confidence = 1.0
            obstacles_msg.obstacles.append(obstacle_msg)
        self.pub_obstacle.publish(obstacles_msg)

        goal_msg = GoalInImage()
        goal_msg.header = goal_parts_msg.header
        left_post = PostInImage()
        left_post.foot_point.x = 9999999999
        left_post.confidence = 1.0
        right_post = PostInImage()
        right_post.foot_point.x = -9999999999
        right_post.confidence = 1.0
        for post in goal_parts_msg.posts:
            if post.foot_point.x < left_post.foot_point.x:
                left_post = post
                left_post.confidence = post.confidence
            if post.foot_point.x > right_post.foot_point.x:
                right_post = post
                right_post.confidence = post.confidence
        goal_msg.left_post = left_post
        goal_msg.right_post = right_post
        goal_msg.confidence = 1.0
        if goal_parts_msg.posts:
            self.pub_goal.publish(goal_msg)

        # create line msg
        line_msg = LineInformationInImage()  # Todo: add lines
        line_msg.header.frame_id = image_msg.header.frame_id
        line_msg.header.stamp = image_msg.header.stamp
        for lp in self.line_detector.get_linepoints():
            ls = LineSegmentInImage()
            ls.start.x = lp[0]
            ls.start.y = lp[1]
            ls.end = ls.start
            line_msg.segments.append(ls)
        self.pub_lines.publish(line_msg)

        # create non_line msg
        # non_line_msg = LineInformationInImage()
        # non_line_msg.header.frame_id = image_msg.header.frame_id
        # non_line_msg.header.stamp = image_msg.header.stamp
        # i = 0
        # for nlp in self.line_detector.get_nonlinepoints():
        #     nls = LineSegmentInImage()
        #     nls.start.x = nlp[0]
        #     nls.start.y = nlp[1]
        #     nls.end = nls.start
        #     if i % 2 == 0:
        #         non_line_msg.segments.append(nls)
        #     i += 1
        # self.pub_non_lines.publish(non_line_msg)

        if self.ball_fcnn_publish_output and self.config[
                'vision_ball_classifier'] == 'fcnn':
            self.pub_ball_fcnn.publish(self.ball_detector.get_cropped_msg())

        if self.publish_fcnn_debug_image and self.config[
                'vision_ball_classifier'] == 'fcnn':
            self.pub_debug_fcnn_image.publish(
                self.ball_detector.get_debug_image())

        # do debug stuff
        if self.publish_debug_image:
            self.debug_image_dings.set_image(image)
            self.debug_image_dings.draw_obstacle_candidates(
                self.obstacle_detector.get_candidates(), (0, 0, 0),
                thickness=3)
            self.debug_image_dings.draw_obstacle_candidates(
                self.obstacle_detector.get_red_obstacles(), (0, 0, 255),
                thickness=3)
            self.debug_image_dings.draw_obstacle_candidates(
                self.obstacle_detector.get_blue_obstacles(), (255, 0, 0),
                thickness=3)
            if self.config['vision_ball_classifier'] == "yolo":
                post_candidates = self.goalpost_detector.get_candidates()
            else:
                post_candidates = self.obstacle_detector.get_white_obstacles()
            self.debug_image_dings.draw_obstacle_candidates(post_candidates,
                                                            (255, 255, 255),
                                                            thickness=3)
            self.debug_image_dings.draw_field_boundary(
                self.field_boundary_detector.get_field_boundary_points(),
                (0, 0, 255))
            self.debug_image_dings.draw_field_boundary(
                self.field_boundary_detector.get_convex_field_boundary_points(
                ), (0, 255, 255))
            self.debug_image_dings.draw_ball_candidates(
                self.ball_detector.get_candidates(), (0, 0, 255))
            self.debug_image_dings.draw_ball_candidates(
                self.field_boundary_detector.balls_under_field_boundary(
                    self.ball_detector.get_candidates(),
                    self._ball_candidate_y_offset), (0, 255, 255))
            # draw top candidate in
            self.debug_image_dings.draw_ball_candidates([top_ball_candidate],
                                                        (0, 255, 0))
            # draw linepoints in red
            self.debug_image_dings.draw_points(
                self.line_detector.get_linepoints(), (0, 0, 255))
            # draw nonlinepoints in black
            # self.debug_image_dings.draw_points(
            #     self.line_detector.get_nonlinepoints(),
            #     (0, 0, 0))

            # publish debug image
            self.pub_debug_image.publish(
                self.bridge.cv2_to_imgmsg(self.debug_image_dings.get_image(),
                                          'bgr8'))

        self._first_callback = False
    fps = int(1 / (time.time() - t))
    cv2.putText(img, "FPS: " + str(fps), (50, 50), font, 1, (255, 255, 255), 2,
                cv2.LINE_AA)
    cv2.imshow('Frame', img)
    frame_count += 1

    #getting input
    k = 0xFF & cv2.waitKey(10)
    if k == 27:
        break

    if frame_count == 80:
        color = (0, 255, 0)

    if frame_count == 100:
        thresh = cv2.mean(gray[165:315, 270:370])
        thresh = thresh[0] - 15
        break

#initilizing values
pressed = False
mouse_enable = False
event_que = []
gesFound = 0
msg = ''

#the main event loop
while (cap.isOpened()):

    t = time.time()
    l = []
Example #42
0
# 5. 方向(Orientation)
(x, y), (MA, ma), angle = cv2.fitEllipse(cnt)
print((x, y), (MA, ma), angle)

# 6.蒙版和像素点(Maskand Pixel Points)
mask = np.zeros(imgray.shape, np.uint8)
cv2.drawContours(mask, [cnt], 0, 255, -1)
pixelpoints = np.transpose(np.nonzero(mask))  # Numpy以(行,列)格式给出坐标
#pixelpoints = cv2.findNonZero(mask) # OpenCV以(x,y)格式给出坐标, row=x和column=y

# 7. 最大最小值和它们的位置(maximumvalue,minimum value and their locations)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(imgray, mask=mask)
print(min_val, max_val, min_loc, max_loc)

# 8. 平均颜色或平均强度(meanColor or mean Intensity)
mean_val = cv2.mean(img, mask=mask)
print(mean_val)

# 9. 极值点(ExtremePoints)
leftmost = tuple(cnt[cnt[:, :, 0].argmin()][0])
rightmost = tuple(cnt[cnt[:, :, 0].argmax()][0])
topmost = tuple(cnt[cnt[:, :, 1].argmin()][0])
bottommost = tuple(cnt[cnt[:, :, 1].argmax()][0])
# draw four Extreme Points
img8 = img.copy()
img_draw8 = cv2.circle(img8,
                       leftmost,
                       radius=5,
                       color=(0, 0, 255),
                       thickness=-1)
img_draw8 = cv2.circle(img8,
Example #43
0
    mask = cv2.dilate(mask, None, iterations=4)
    mask = cv2.erode(mask, None, iterations=2)

    #cv2.imshow('mask', mask)
    #cv2.waitKey(0)
    #find contours in mask, initialize current center
    contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    cnts = contours[-2]
    center = None
    b, g, r = cv2.split(frame)
    b = cv2.bitwise_and(b, mask)
    g = cv2.bitwise_and(g, mask)
    r = cv2.bitwise_and(r, mask)
    frame = cv2.merge((b, g, r))
    averagemask = cv2.mean(frame, mask=mask)

    #cv2.imshow('frame', new_frame)

    if len(cnts) > 0:
        #find largest contour, use it to compute min enclosed cirlce
        #and centroid
        c = max(cnts, key=cv2.contourArea)
        ((x, y), radius) = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
        #proceed if radius is min size --NEED TO FIGURE OUT
        if radius > 1:
            #draw the circle and centroid on the frame,
            #then update the list of tracked points
            cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
Example #44
0
# position resolution plot directory
out_dirname2 = '../../results/matching_research/position_resolution/'
if not exists(out_dirname2):
    makedirs(out_dirname2)

# contractional and rotational resolution plot directory
out_dirname3 = '../../results/matching_research/contractional_rotational_resolution/'
if not exists(out_dirname3):
    makedirs(out_dirname3)

#-------------------------------------------------------------------------------
# position resolution preparation
#-------------------------------------------------------------------------------
# "T" means template and "I" means image, used throughout the script
# sum(T^2) is compute once, but used for each image
T_mean = cv2.mean(templ, alpha)
T = cv2.subtract(templ, T_mean, mask=alpha, dtype=cv2.CV_32S)
T2 = cv2.pow(T, 2)
T2_sum = cv2.sumElems(T2)

#-------------------------------------------------------------------------------
# contractional resolution preparation
#-------------------------------------------------------------------------------
# scaled templates and masks
log_scale = numpy.linspace(-1.0, 1.0, 11)
x_scale = [pow(2, s) for s in log_scale]

m = cv2.moments(alpha, True)
templ_center = (int(m['m10'] / m['m00']), int(m['m01'] / m['m00']))

templ_scale_v = [cv2.resize(templ, (0, 0), fx=s, fy=s) for s in x_scale]
Example #45
0
    def processImgByThresh(self):
        self.recover()
        img = self.data.img_show.copy()
        img_binary = self.data.img_binary.copy()
        '''img_r = self.data.img_r.copy()
		img_g = self.data.img_g.copy()
		img_b = self.data.img_b.copy()'''
        img_b, img_g, img_r = cv2.split(img)
        avg = cv2.mean(img)
        thresh_min = min(avg[:2])

        get_max = lambda x: self.thresh_max if x > self.thresh_min else x
        get_min = lambda x: self.thresh_max if x > self.thresh_min else x
        '''
		print('平均像素值:', avg, thresh_min)
		if 0 == self.thresh_type or 2 == self.thresh_type:
			# 使用最小值
			if 0 == self.chose_type_min:
				#取上
				pass
		
		print(self.thresh_type)
		for row in range (5):
			for col in range (5):
				print(img[row][col],img_r[row][col],img_g[row][col],img_b[row][col])

		thresh, img_r = cv2.threshold(img_r, self.thresh_min, 255, cv2.THRESH_BINARY)# | cv2.THRESH_TRUNC)
		thresh, img_g = cv2.threshold(img_g, self.thresh_min, 255, cv2.THRESH_BINARY)# | cv2.THRESH_TRUNC)
		thresh, img_b = cv2.threshold(img_b, self.thresh_min, 255, cv2.THRESH_BINARY)# | cv2.THRESH_TRUNC)
		#self.data.img_show = cv2.merge([img_r, img_g, img_b])
		#self.shower.showProcessedImg(True)
		print(self.data.height, self.data.width)
		print('type:', type(img_binary))'''
        '''img_b, img_g, img_r = cv2.split(img)
		img_gray = cv2.cvtColor(self.data.img_show, cv2.COLOR_BGR2GRAY)
		#img_binary = cv2.equalizeHist(img_gray)
		img_b = cv2.equalizeHist(img_b)
		img_g = cv2.equalizeHist(img_g)
		img_r = cv2.equalizeHist(img_r)
		img_binary = cv2.merge((img_b, img_g, img_r))
		self.data.img_show = img_binary
		self.shower.showProcessedImg()
		return
		'''
        if self.is_splite:
            for row in range(self.data.height):
                for col in range(self.data.width):
                    value = img[row][col]
                    min_value = min(value)
                    max_value = max(value)
                    if 0 == self.thresh_type or 2 == self.thresh_type:
                        # 使用最小值
                        if min_value > self.thresh_min or (
                                self.is_use_rgb_differ
                                and min_value + 50 < max_value):
                            img_binary[row][col] = self.thresh_max
                        '''
						if 0 == self.chose_type_min:
							for i in range(3):
								if value[i] > self.thresh_min:
									img_binary[row][col] = self.thresh_max
									pass
						else:
							for i in range(3):
								if img[row][col][i] > self.thresh_min:
									img[row][col][i] = self.thresh_min
						'''

                    elif 1 == self.thresh_type or 2 == self.thresh_type:
                        if 0 == self.chose_type_min or 2 == self.chose_type:
                            pass
        else:
            print('不拆分')
            img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            thresh, img_binary = cv2.threshold(img_gray, self.thresh_min, 255,
                                               cv2.THRESH_BINARY)
            img_differ = self.getDiffer(self.data.img_rgb)
            size = len(img_differ)
            img_differ = np.array(img_differ)
            print('size:', img_differ.shape)
            print(img_differ)
            if self.is_use_rgb_differ:
                for i in range(self.data.height):
                    for j in range(self.data.width):
                        #if img_differ[i][j] > 50:
                        if 0 == img_binary[i][
                                j] and img_differ[i][j] > 50:  #rgb最大差异
                            img_binary[i][j] = 255

        self.data.img_binary = img_binary
        self.data.img_show = img_binary.copy()
        self.shower.showProcessedImg(True)
                #get top 1 color
                _, _, _, max_loc_h = cv2.minMaxLoc(hist_h)
                _, _, _, max_loc_s = cv2.minMaxLoc(hist_s)
                _, _, _, max_loc_v = cv2.minMaxLoc(hist_v)

                #print(max_loc_h[1],max_loc_s[1],max_loc_v[1])

                #artifically increase the saturation of the colors (s+40)
                scalar_color = (max_loc_h[1] * 12, max_loc_s[1] * 32 + 40,
                                max_loc_v[1] * 32)

                crop_img_hsv[:] = (scalar_color)
                crop_img_hsv = cv2.cvtColor(crop_img_hsv, cv2.COLOR_HSV2BGR)

                #in BGR color space:
                meanCurr = cv2.mean(crop_img_hsv)

                if (ap.meanColor == (999, 999, 999)):
                    ap.meanColor = meanCurr
                else:
                    ap.meanColor = ((ap.meanColor[0] + meanCurr[0]) / 2,
                                    (ap.meanColor[1] + meanCurr[1]) / 2,
                                    (ap.meanColor[2] + meanCurr[2]) / 2)
                    scalar_color = ap.meanColor
                    #print(ap.name, meanCurr, ap.meanColor)

                #cv2.imshow("color", crop_img_hsv)

                #ground truth colors
                SBlack = (0.0, 0.0, 0.0)
                SWhite = (255.0, 255.0, 255.0)
Example #47
0
def getMean(img, mask):
    mean = cv2.mean(img, mask)
    return mean
Example #48
0
    try:
        ret, frame = cap.read()
        if ret:
            frame = cv2.resize(frame, (0,0), fx=1/2,fy=1/2, interpolation=cv2.INTER_AREA)
     
            start_time = time()
            boxes = detector.detect(frame)
            execution_time['detection'] = time() - start_time
            start_time = time()
            
            # ignore too bright faces
            temp_boxes = []
            for box in boxes:
                x1, y1, x2, y2 = box
                hsv = cv2.cvtColor(frame[y1:y2,x1:x2,:], cv2.COLOR_BGR2HSV)
                brightness = cv2.mean(hsv)[2]
                if 75 < brightness < 225:
                    temp_boxes.append(box)
            boxes = temp_boxes
                    
            detector.debug(frame)
            labels = recog.recog(frame, boxes, threshold=0.0)
            execution_time['recognition'] = time() - start_time

            print(execution_time)

            # Post-processing the raw recognition data
            recog.put_to_result_buffer(boxes,labels)


            # show detected faces and recognized labels
Example #49
0
import cv2

imgRGB = cv2.imread("tampa-rgb.jpeg")
imgTonsDeCinza = cv2.imread("tampa-tons-de-cinza.jpeg", 0)

valorMedioRGB = cv2.mean(imgRGB)
valorMedioCinza = cv2.mean(imgTonsDeCinza)

print(valorMedioRGB)
print(valorMedioCinza)
Example #50
0
            ## Hardcoded Diameter Range in pixels, needs to be optimized after testing
            LOW_DIAMETER_BOUND = 20
            HIGH_DIAMETER_BOUND = 100
            # Original tolerances were 20 and 150

            if (equi_diameter > LOW_DIAMETER_BOUND
                    and equi_diameter < HIGH_DIAMETER_BOUND):
                mask = np.zeros(imgray.shape, np.uint8)
                cv2.drawContours(mask, [cntr], 0, 255, -1)
                pixelpoints = np.transpose(np.nonzero(mask))
                img_fg = cv2.bitwise_and(depth_frame.asarray(),
                                         depth_frame.asarray(),
                                         mask=mask)
                #img_fg = cv2.blur(img_fg,5)
                img_fg = cv2.medianBlur(img_fg, 5)
                mean_val = cv2.mean(img_fg)
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(img_fg)
                moment = cv2.moments(cntr)
                cx = int(moment['m10'] / moment['m00'])
                cy = int(moment['m01'] / moment['m00'])

                # Rule of 57 Attempt
                #mm_diameter = (1.0/57.0) * (equi_diameter/6.006) * max_val
                coords = registration.getPointXYZ(depth_frame, max_loc[0],
                                                  max_loc[1])

                mm_diameter = math.sin((equi_diameter / w) * FOVX) * max_val
                #(equi_diameter / w) * (2.0 * max_val * math.tan(/2.0)) # ~FOV

                ellipse = cv2.fitEllipse(cntr)
                img = cv2.ellipse(img, ellipse, (255, 255, 255), 2)
Example #51
0
	color=cv2.cvtColor(frame,cv2.COLOR_HSV2BGR)
	frame=hsv
	orig=frame
	frame=cv2.GaussianBlur(frame,(5,5), 5)
	rows=frame.shape[0]
	cols=frame.shape[1]
	cv2.rectangle(frame, (int(rows/3),int(cols/2.5)), (int(rows/3+SQ_SIZE),int(cols/2.5)+SQ_SIZE), (0,255,255))
	cv2.rectangle(frame, (int(rows/4.2),int(cols/2.9)), (int(rows/4.2+SQ_SIZE),int(cols/2.9)+SQ_SIZE), (0,255,255))
	cv2.rectangle(frame, (int(rows/2.7),int(cols/3.6)), (int(rows/2.7+SQ_SIZE),int(cols/3.6)+SQ_SIZE), (0,255,255))
	cv2.rectangle(frame, (int(rows/4),int(cols/2.3)), (int(rows/4+SQ_SIZE),int(cols/2.3)+SQ_SIZE), (0,255,255))
	cv2.rectangle(frame, (int(rows/2),int(cols/2.5)), (int(rows/2+SQ_SIZE),int(cols/2.5)+SQ_SIZE), (0,255,255))
	cv2.rectangle(frame, (int(rows/2.7),int(cols/8)), (int(rows/2.7+SQ_SIZE),int(cols/8)+SQ_SIZE), (0,255,255))

	roi_1=orig[int(cols/2.5):int(cols/2.5)+SQ_SIZE, int(rows/3):int(rows/3+SQ_SIZE)]
	if(reset):
		mean_1=cv2.mean(roi_1)
	TR_MIN = np.array([0, max(mean_1[1]-sat_sens,0), mean_1[2]-val_sens],np.uint8)
	TR_MAX = np.array([mean_1[0]+hue_sens, mean_1[1]+sat_sens, mean_1[2]+val_sens],np.uint8)
	tr_1=cv2.inRange(orig, TR_MIN, TR_MAX)

	roi_2=orig[int(cols/2.9):int(cols/2.9)+SQ_SIZE, int(rows/4.2):int(rows/4.2+SQ_SIZE)]
	if(reset):
		mean_2=cv2.mean(roi_2)
	TR_MIN = np.array([0, max(mean_2[1]-sat_sens,0), mean_2[2]-val_sens],np.uint8)
	TR_MAX = np.array([mean_2[0]+hue_sens, mean_2[1]+sat_sens, mean_2[2]+val_sens],np.uint8)
	tr_2=cv2.inRange(orig, TR_MIN, TR_MAX)


	roi_3=orig[int(cols/3.6):int(cols/3.6)+SQ_SIZE, int(rows/2.7):int(rows/2.7+SQ_SIZE)]
	if(reset):
		mean_3=cv2.mean(roi_3)
    vx, vy = flow[..., 0], flow[..., 1]
    mag, ang = cv2.cartToPolar(vx, vy)

    # 움직임 벡터 시각화
    hsv = np.zeros((h2, w2, 3), dtype=np.uint8)
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 1] = 255
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    cv2.imshow('flow', bgr)

    # 움직임이 충분히 큰 영역 선택
    motion_mask = np.zeros((h2, w2), dtype=np.uint8)
    motion_mask[mag > 2.0] = 255  # 잘못 계산된 노이즈 벡터 없애기

    mx = cv2.mean(vx, mask=motion_mask)[0]
    my = cv2.mean(vy, mask=motion_mask)[0]
    m_mag = math.sqrt(mx * mx + my * my)

    if m_mag > 4.0:  # 충분히 큰 움직임만 감지
        # -pi < mx, my < pi
        m_ang = math.atan2(my, mx) * 180 / math.pi  # -180 ~ 180
        m_ang += 180  # 0 ~ 360

        pt1 = (100, 100)

        if m_ang >= 45 and m_ang < 135:
            pt2 = (100, 30)
        elif m_ang >= 135 and m_ang < 225:
            pt2 = (170, 100)
        elif m_ang >= 225 and m_ang < 315:
size = (int(cap.get(cv.cv.CV_CAP_PROP_FRAME_WIDTH)),
        int(cap.get(cv.cv.CV_CAP_PROP_FRAME_HEIGHT)))
fps = cap.get(cv.cv.CV_CAP_PROP_FPS)
frames = int(cap.get(cv.cv.CV_CAP_PROP_FRAME_COUNT))
out = cv.VideoWriter(outpath, fourcc, fps, size)
if not out.isOpened():
    print("Failed to open writer for " + outpath)
    sys.exit(1)
ret, img = cap.read()
cap.release()

# Get color bounds
# Average colors
mask = np.zeros(img.shape[:2], np.uint8)
cv.circle(mask, (selected_x, selected_y), sampling_radius, (255, 255, 255), -1)
average_color = cv.mean(img, mask)
# Convert to HSV and get hue, we need it later
hue = cv.cvtColor(np.array([[average_color]], dtype=np.uint8),
                  cv.COLOR_BGR2HSV)[0][0][0]
# All we care about is the hue, sat and val will be fixed to wide range
sv_low = int(255 - (tolerance * 255))
dark = np.array([hue - 10, sv_low, sv_low])
light = np.array([hue + 10, 255, 255])

# Loop over all frames
cap = cv.VideoCapture(video_path)
i = 0
data = []

for i in range(1, frames + 1):
    # Read a frame
Example #54
0
    cnts = sorted(cnts, key=cv.contourArea, reverse=True)
    sizes = []
    for cnt in cnts:
        mask = np.zeros((img.shape), np.uint8)
        mask.fill(255)
        mask = cv.drawContours(mask, [cnt], -1, 0, cv.FILLED)
        img2 = cv.bitwise_or(mask, img)
        rect = cv.minAreaRect(cnt)
        area = abs(cv.contourArea(cnt))
        x, y = rect[0]
        w, h = rect[1]
        dense = int(255 * area / w / h)
        prop = int(255 * h / w) if w > h else int(255 * w / h)
        sizes.append([prop, max(w, h)])
        out = getSubImage(rect, img2)
        b, g, r, _ = np.int0(cv.mean(out))
        print(prop, dense, b, g)
        params.append([prop, dense, b, g, i, out])

x = list(par[:4] for par in params)
kmeans = KMeans(n_clusters=70)
y_kmeans = kmeans.fit_predict(x)
print(y_kmeans)

for i in range(len(params)):
    prop, dense, b, g, numb, out = params[i]
    if not os.path.exists(str(y_kmeans[i])):
        os.makedirs(str(y_kmeans[i]))
    cv.imwrite(
        '%s\\%s_%s_%s_%s_card%s.bmp' % (y_kmeans[i], prop, dense, b, g, i),
        out)
Example #55
0
            lower, upper, minPoints, isFruit = boundary
            tmpMask = cv2.inRange(img, np.array(lower), np.array(upper))

            contours, hierarchy = cv2.findContours(tmpMask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            if (mask is None):
                mask = tmpMask
            else:
                mask = cv2.bitwise_or(mask, tmpMask)

            for c in contours:
                if (len(c) < minPoints):
                    # contour is too small, probably not a fruit/bomb
                    continue

                centerX, centerY, __1, __2 = map(int, cv2.mean(c))

                sumX, sumY = 0, 0
                rows = 70
                cols = 70
                cnt = 0

                # get center of current object
                for i in range(rows):
                    for j in range(cols):
                        x = int(centerX + i - rows / 2)
                        y = int(centerY + j - cols / 2)
                        if x < 0 or x >= width or y < 0 or y >= height:
                            continue
                        val = tmpMask[y, x]
                        if val:
Example #56
0
contmask = np.zeros(im.shape, np.uint8)

# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(contmask, keypoints, np.array([]),
                                      (0, 0, 255),
                                      cv2.DRAW_MATCHES_FLAGS_DEFAULT)

# Show keypoints
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
cv2.destroyAllWindows()

mean = cv2.mean(im, mask=mask)

rows, cols, colors = im.shape
rows = rows - 1
cols = cols - 1
cvfirst = 0
cvsecond = 0
imgrgb = []
k = 0
topdistance = 0
i = 0
for y in range(cols):
    for x in range(rows):
        if ((im_with_keypoints[x, y] != [0, 0, 0]).any()):
            coords = [y, x]
            imgrgb.append(coords)
Example #57
0
def means(m1, m2):  # 求图像的均值(用mean方法)
    print(cv2.mean(m1))

    print(cv2.mean(m2))
def find_coffee_amount():
    min = sys.maxsize
    fb = firebase.FirebaseApplication("https://blinding-heat-3035.firebaseio.com")
    query_rgb = 0
    empty_rgb = 0
    for filename in glob.glob("empty_pot"+"/*.jpg"):
        im = Image.open(filename)
        pix = cv2.imread(filename)
        mean = cv2.mean(pix)
        empty_rgb += mean[0] + mean[1] + mean[2]
    empty_rgb /= 2 #average out the rgb values for the empty pots
    for filename in glob.glob("average_pics"+"/*.jpg"):
        im = Image.open(filename)
        pix = cv2.imread(filename)
        mean = cv2.mean(pix)
        query_rgb += mean[0] + mean[1] + mean[2]
    query_rgb /= 2 #average out the rgb values for the current pot
    query_rgb -= empty_rgb #normalize rgb values of images by subtracting the empty pot 
    print('query rgb ' + str(query_rgb))
    for filename in glob.glob("rgb_cropped"+"/*.jpg"):
        im = Image.open(filename)
        pix = cv2.imread(filename)
        mean = cv2.mean(pix)
        rgb_val = mean[0] + mean[1] + mean[2]
        rgb_val -= empty_rgb
        print(str(rgb_val) + ' ' + filename)
        if(abs(query_rgb - rgb_val) <= min): # compare rgb values of reference images with the current pot to check for similarity in amount of coffee in pot
            min = abs(query_rgb - rgb_val)
            min_file = filename
            most_sim = cv2.imread(filename)
    print(min_file)#files are labeled based of percentage coffee within each pot
    min_file = os.path.splitext(min_file)[0]
    min_file = min_file.split('_',2)
    print(min_file)
    result = fb.put('/user','one', min_file[2]) #update server with current percentage of coffee in the pot
Example #59
-6
def get_contour_data(contour, image):
    data = {}
    data["empty"] = cv2.contourArea(contour)<=3
    data["convex"] = cv2.isContourConvex(contour)
    data["rect"] = cv2.boundingRect(contour)
    x,y,w,h = data["rect"]
    data["size"] = max(w,h)#float(w+h)/2
    data["radius"] = math.sqrt(w**2+h**2)/2.0
    data["points"] = [(x,y),(x,y+h),(x+w,y+h),(x+w,y)]
    M = cv2.moments(np.array([[p] for p in data["points"]],dtype=np.int32))
    data["center"] = (M['m10']/(M['m00']+0.00001), M['m01']/(M['m00']+0.00001))
    #if the contours are circles instead of squares
    if not doc_parameters["squares"]:
        center, radius = cv2.minEnclosingCircle(contour)
        data["center"] = (int(center[0]),int(center[1]))
        data["radius"] = int(radius)
        new_radius = int((1-doc_parameters["selection_circle_padding"])*radius)

        mask = np.zeros(image.shape,np.uint8)
        cv2.ellipse(mask, (int(center[0]),int(center[1])), (new_radius, new_radius), 0, 0, 360, 255, -1)
        data["mean_intensity"] = cv2.mean(image,mask = mask)[0]

    else:
        b = doc_parameters["selection_box_padding"]/2.0
        fillarea = np.array([ [[x+b*w,y+b*h]] , [[x+b*w,y+h-b*h]] , [[x+w-b*w,y+h-b*h]] , [[x+w-b*w,y+b*h]] ], dtype=np.int32 )
        mask = np.zeros(image.shape,np.uint8)
        cv2.drawContours(mask,[fillarea],0,255,-1)
        #improve the calculation of the intensity that decides if it is selected or not.
        data["mean_intensity"] = cv2.mean(image,mask = mask)[0]

    # if doc_parameters["debug"]: print data["mean_intensity"]


    return data
Example #60
-6
 def isblack(self, border=10, limit=150.0):
     if self._isblack is None:
         # TODO: Caclulate 10%
         # cv2.mean(img, mask=mask)
         mean_top = np.average(cv2.mean(self._img[:border])) < limit
         mean_bot = np.average(cv2.mean(self._img[-border:])) < limit
         self._isblack = mean_top and mean_bot
     return self._isblack