Exemple #1
0
	def getSelectionStats(self, hsv_mask, rgb_image_in, depth_image_in):
		total_pixels_out_mask = float(cv2.countNonZero(self.select_mask))
		total_pixels_in_mask = float(cv2.countNonZero(self.inv_select_mask))
		print "total_pixels_in_mask, total_pixels_out_mask"
		print total_pixels_in_mask, total_pixels_out_mask

		# self.findBlobsofHue(hsv_mask, num_blobs, rgb_image_in)
		res, mask = self.applyHSVMask(hsv_mask, rgb_image_in, depth_image_in)

		self.select_img = cv2.bitwise_and(res, self.original_img, mask=self.select_mask)
		self.inv_select_img = cv2.bitwise_and(res, self.original_img, mask=self.inv_select_mask)

		img2gray = cv2.cvtColor(self.select_img,cv2.COLOR_BGR2GRAY)
		ret, outmask = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY)
		
		img2gray = cv2.cvtColor(self.inv_select_img,cv2.COLOR_BGR2GRAY)
		ret, inmask = cv2.threshold(img2gray, 0, 255, cv2.THRESH_BINARY)
		
		window_name = 'Auto Calibrate'
		cv2.imshow(window_name, inmask)
		# key = cv2.waitKey(0)


		curr_pixels_out_mask = cv2.countNonZero(outmask)/total_pixels_out_mask
		curr_pixels_in_mask = cv2.countNonZero(inmask)/total_pixels_in_mask
		# print "curr_pixels_in_mask, curr_pixels_out_mask"
		print curr_pixels_in_mask, curr_pixels_out_mask

		return curr_pixels_in_mask, curr_pixels_out_mask
Exemple #2
0
def sameCardShape(shape1, shape2):
    #val = cv2.matchShapes(shape1, shape2, 1, 0.0)
    #print val
    (maxX1, maxY1) = (max(shape1[:,0,0]), max(shape1[:,0,1]))
    (minX1, minY1) = (min(shape1[:,0,0]), min(shape1[:,0,1]))
    (maxX2, maxY2) = (max(shape2[:,0,0]), max(shape2[:,0,1]))
    (minX2, minY2) = (min(shape2[:,0,0]), min(shape2[:,0,1]))

    maxXDiff = max(maxX1 - minX1, maxX2 - minX2)
    maxYDiff = max(maxY1 - minY1, maxY2 - minY2)

    image1 = np.zeros((maxYDiff,maxXDiff), np.uint8)
    cv2.drawContours(image1, [shape1], 0, 255, offset=(-minX1, -minY1))
    image1 = cv2.dilate(image1, np.ones((17,17), np.uint8))
    if DEBUGSHAPES:
        showImage(image1, 'contour1', wait=False)

    image2 = np.zeros((maxYDiff,maxXDiff), np.uint8)
    cv2.drawContours(image2, [shape2], 0, 255, offset=(-minX2, -minY2))
    image2 = cv2.dilate(image2, np.ones((17,17), np.uint8))
    if DEBUGSHAPES:
        showImage(image2, 'contour2', wait=False)


    intersectImage = cv2.bitwise_and(image1, image2)
    intersectCount = float(cv2.countNonZero(intersectImage))
    count1 = cv2.countNonZero(image1)
    count2 = cv2.countNonZero(image2)
    if DEBUGSHAPES:
        print str(intersectCount / count1 > shape_similarity_threshold or \
                  intersectCount / count2 > shape_similarity_threshold) + \
            ' intersect ratio = ' + str(intersectCount/count1) + ', ' + str(intersectCount/count2)
        showImage(intersectImage, 'and')
    
    return intersectCount / min(count1, count2) > shape_similarity_threshold
    def toSkeleton(self, image):

        imageSize = np.size(image)
        skeleton  = np.zeros(image.shape, np.uint8)

        ret, image = cv2.threshold(image, 127, 255, 0)
        element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

        done = False

        while not done:

            eroded   = cv2.erode     (image,    element)
            temp     = cv2.dilate    (eroded,   element)
            temp     = cv2.subtract  (image,    temp)
            skeleton = cv2.bitwise_or(skeleton, temp)
            image    = eroded.copy()

            print "Image Size:", imageSize
            print "CountNonZero:", cv2.countNonZero(image)

            zeros = imageSize - cv2.countNonZero(image)
            if zeros == imageSize:
                done = True

        return skeleton
Exemple #4
0
def checkCross(img_bin, cnt, points):
    x_cnt, y_cnt, w_cnt, h_cnt = cv2.boundingRect(cnt)
    #initial percentage
    blob = img_bin[y_cnt:(y_cnt+h_cnt), x_cnt:(x_cnt+w_cnt)]

    area = w_cnt*h_cnt
    colored = cv2.countNonZero(blob)
    if SEARCH_COLOR > 0:
        percentage = (np.float32(area)-np.float32(colored))/np.float32(area)
    else:
        percentage = (np.float32(colored))/np.float32(area)
    if percentage > 0.35:
        return False

    line_width = int(np.float32(w_cnt+h_cnt)/40.0)+1
    cv2.line(img_bin, (points[0][0], points[0][1]), (points[2][0], points[2][1]), SEARCH_COLOR, line_width)
    cv2.line(img_bin, (points[1][0], points[1][1]), (points[3][0], points[3][1]), SEARCH_COLOR, line_width)
    blob = img_bin[y_cnt:(y_cnt+h_cnt), x_cnt:(x_cnt+w_cnt)]
    coloredNew = cv2.countNonZero(blob)
    if SEARCH_COLOR > 0:
        percentageNew = (np.float32(area)-np.float32(coloredNew))/np.float32(area)
    else:
        percentageNew = (np.float32(coloredNew))/np.float32(area)
    if percentageNew > 0.2:
        return False
    else:
        return True
def dark_clouds(image_count):

	# LOAD THE IMAGE, CONVERT IT TO GRAYSCALE, AND BLUR IT
	# SLIGHTLY TO REMOVE HIGH FREQUENCY EDGES THAT WE AREN'T
	# INTERESTED IN
	img = cv2.imread("images/sky_region/ExtractedSky" + str(image_count) + ".jpg")
	original = img.copy()

	# FIND AND COUNT THE NUMBER OF BLACK PIXELS IN AN IMAGE
	BLACK = np.array([0,0,0],np.uint8)
	blackRange = cv2.inRange(img,BLACK,BLACK)
	no_black_pixels = cv2.countNonZero(blackRange)

	# CONVERT BGR TO HSV
	hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

	# DEFINE RANGE OF GREY COLOR IN HSV
	lower_grey = np.array([90,0,0], dtype=np.uint8)
	upper_grey = np.array([130,255,125], dtype=np.uint8)

	# THRESHOLD THE HSV IMAGE TO GET ONLY GREY COLORS
	mask = cv2.inRange(hsv, lower_grey, upper_grey)

	# COUNT NUMBER OF GREY PIXELS
	no_grey_pixels = cv2.countNonZero(mask)

	# BITWISE-AND MASK AND ORIGINAL IMAGE
	res = cv2.bitwise_and(original,original, mask= mask)

	'''cv2.imshow("masked grey sky",res)'''

	# GET THE TOTAL NUMBER OF PIXELS
	total_pixels = original.size / 3

	# GET THE NUMBER OF PIXELS IN THE SKY REGION OF AN IMAGE
	sky_region_pixels = total_pixels - no_black_pixels


	# CALCULATE THE PERCENTAGE OF THE COLOUR grey PRESENT IN THE SKY
	if no_grey_pixels == 0:
		return("There is no grey pixels in the image." )
	else:
		grey_percentage = (no_grey_pixels / sky_region_pixels) * 100

		'''print("The total number of pixels is: " + str(total_pixels))
		print("The number of grey pixels is: " + str(no_grey_pixels))
		print("The number of black pixels is: " + str(no_black_pixels))
		print("The number of pixels of the sky region is : " + str(sky_region_pixels))'''
		print("The percentage of grey in the sky region is : " + str(grey_percentage))

		if grey_percentage > 70:
			return("Severe stormy skies.\n" )
		elif grey_percentage > 50 and grey_percentage <= 70:
			return("Very Stormy skies.\n" )
		elif grey_percentage > 30 and grey_percentage <= 50:
			return("Some stormy skies.\n" )
		elif grey_percentage > 9 and grey_percentage <= 30:
			return("Scattered rain clouds.\n" )
		else:
			return("The sky is overcast.\n")
	def area_based_track(self, frame):
		# 3. count # of moved pixels to compute occupancy
		# 3a. subsampling
		box = conf.TARGET_AREA
		(H, W) = frame.shape[:2]
		if conf.SUBSAMPLING_SCALE > 1:
			frame = imutils.resize(frame, width=W/conf.SUBSAMPLING_SCALE)
			box = list(map(lambda (x,y): (x/conf.SUBSAMPLING_SCALE, y/conf.SUBSAMPLING_SCALE), box))
			(H, W) = frame.shape[:2]
		box_height = box[1][1] - box[0][1]
		box_width = box[1][0] - box[0][0]
		box_area = box_height * box_width

		# 3b. check sudden change
		if conf.CHECK_SUDDEN_CHANGE and cv2.countNonZero(frame) > (W*H) * conf.SUDDEN_CHANGE_RATIO:
			print("Sudden change detected")
			self.flush_objects()
			return []

		# 3c. Split frame into 3 parts and compute occupancy of each
		count_lst = []
		for i in range(3):
			y_pos = box[0][1] + box_height*i/3
			f = frame[y_pos:y_pos + box_height/3, box[0][0]:box[1][0]]
			count_lst.append(cv2.countNonZero(f))
		occ_lst = list(map(lambda c: (c / float(box_area/3)) > conf.OCCUPIED_RATIO, count_lst))

		#print(count_lst[0], count_lst[1], count_lst[2])
		#print(occ_lst)

		# 4. Check crossed objects
		self.cur_in = False
		self.cur_out = False
		if self.occ_lst is not None and self.iteration - self.last_report_iter > conf.MIN_REPORT_INTERVAL:
			if self.occ_lst[1] == True and occ_lst[1] == False:
				if any(occ_lst): 
					self.checkout_occ = (self.iteration, occ_lst, list(self.occ_history))
				else: # quick change -> Use previous frame
					self.checkout_occ = (self.iteration, list(self.occ_lst), list(self.occ_history))

		# After noisy frames
		if self.checkout_occ is not None and self.iteration - self.checkout_occ[0] > conf.MIN_NOISY_INTERVAL:
			oc = self.checkout_occ[1]
			oh = self.checkout_occ[2]
			#print("!!!", oc, oh)

			if oc[0] == True and self.occ_in_history(oh, 2):
				self.cur_in = True
			if oc[2] == True and self.occ_in_history(oh, 0):
				self.cur_out = True

			if self.cur_in or self.cur_out:
				self.last_report_iter = self.checkout_occ[0]
				#print("C", self.cur_in, self.cur_out)
			self.checkout_occ = None

		self.occ_lst = occ_lst
		self.occ_history.append(occ_lst)

		return []
Exemple #7
0
    def get_active_cell(self, image):
        
        # obtain motion between previous and current image
        current_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        delta = cv2.absdiff(self.previous_gray, current_gray)
        threshold_image = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]

        # set cell height and width
        height, width = threshold_image.shape[:2]
        cell_height = height/2
        cell_width = width/3
 
        # store motion level for each cell
        cells = np.array([0, 0, 0])
        cells[0] = cv2.countNonZero(threshold_image[cell_height:height, 0:cell_width])
        cells[1] = cv2.countNonZero(threshold_image[cell_height:height, cell_width:cell_width*2])
        cells[2] = cv2.countNonZero(threshold_image[cell_height:height, cell_width*2:width])
 
        # obtain the most active cell
        top_cell =  np.argmax(cells)
 
        # return the most active cell, if threshold met
        if(cells[top_cell] >= self.THRESHOLD):
            return top_cell
        else:
            return None
Exemple #8
0
def cropImage(src):
    ptX = ptY = 0
    roiWidth = roiHeight = 0

    for i in range(src.shape[1]):
        if(cv2.countNonZero(src[:,i])>0):
            ptX = i;
            roiWidth = src.shape[1]-ptX;
            break;
        
    for j in range(src.shape[0]):
        if(cv2.countNonZero(src[j,:])>0):
            ptY = j;
            roiHeight = src.shape[0]-ptY;
            break;
        
    img1 = src[ptY:ptY+roiHeight, ptX:ptX+roiWidth]
    for i in range(img1.shape[1]-1,-1,-1):
        if(cv2.countNonZero(img1[:,i])>0):
            roiWidth = i+1;
            break;
        
    for j in range(img1.shape[0]-1,-1,-1):   
        if(cv2.countNonZero(img1[j,:])>0):
            roiHeight = j+1;
            break;
    
    img2 = img1[0:roiHeight,0:roiWidth];
    return img2
	def detectBorder(image,interestL):
		ret={}

		imgHeight, imgWidth = image.shape[:2]
	
		if "y0" in interestL:
			for y in xrange(imgHeight):
				if cv2.countNonZero(image[y:y+1,:])>0:
					ret["y0"]=y
					break

		if "x0" in interestL:
			for x in xrange(imgWidth):
				if cv2.countNonZero(image[:,x:x+1])>0:
					ret["x0"]=x
					break

		if "x1" in interestL:
			for x in reversed(xrange(imgWidth)):
				if cv2.countNonZero(image[:,x:x+1])>0:
					ret["x1"]=x
					break

		if "y1" in interestL:
			for y in reversed(xrange(imgHeight)):
				if cv2.countNonZero(image[y:y+1,:])>0:
					ret["y1"]=y
					break

		return ret
Exemple #10
0
def segmentImage(imarray):
    height = imarray.shape[0]
    width = imarray.shape[1]
    totalSegPixels = height * width / 16.0
    k = 0
    ratioList = []
    print (height * width)
    for i in range(4):
        for j in range(4):
            x = i * width / 4
            y = j * height / 4
            newH = height / 4
            newW = width / 4
            cv2.rectangle(imarray, (x, y), (x + newW, y + newH), (0, 255, 0), 2)
            segment = imarray[y : y + newH, x : x + newW]
            cv2.imshow("segment" + str(k), segment)
            blackPixels = int(totalSegPixels) - int(cv2.countNonZero(segment))
            ratio = blackPixels * 1.0 / totalSegPixels * 1.0
            print (cv2.countNonZero(segment))
            print (str(k) + " has " + str(ratio))
            ratioList.append(ratio)
            k += 1

    letter = checkLetter(ratioList)
    print letter
    print "All segments done"
    return letter
Exemple #11
0
def cropRegions(img):
    global upperRegion
    global lowerRegion
    global upperPixCnt
    global lowerPixCnt
    upperRegion = img[1:357, 612:1324]                          # check markers.jpg for markers and calculations
    lowerRegion = img[358:712, 612:1324]
    upperPixCnt = cv2.countNonZero(upperRegion)
    lowerPixCnt = cv2.countNonZero(lowerRegion)
    def __markerRecognize(self, img_gray, possible_markers):
        final_markers = []

        bit_matrix = np.ndarray((5, 5), np.uint8)
        for i in range(0, len(possible_markers)):
            M = cv2.getPerspectiveTransform(possible_markers[i].m_corners, self.__m_marker_coords)
            marker_image = cv2.warpPerspective(img_gray, M, (MARKER_SIZE, MARKER_SIZE))
            _, marker_image = cv2.threshold(marker_image, 125, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)

            flag = False
            for y in range(0, 7):
                inc = 6
                if(y==0 or y==6):
                    inc = 1
                cell_y = y * MARKER_CELL_SIZE

                for x in range(0, 7, inc):
                    cell_x = x * MARKER_CELL_SIZE
                    none_zero_count = cv2.countNonZero(marker_image[
                        cell_y:(cell_y + MARKER_CELL_SIZE), cell_x:(cell_x + MARKER_CELL_SIZE)
                    ])
                    if(none_zero_count > MARKER_CELL_SIZE * MARKER_CELL_SIZE / 4):
                        flag = True
                        break
                if(flag):
                    break
            if(flag):
                continue

            for y in range(0, 5):
                cell_y = (y + 1) * MARKER_CELL_SIZE

                for x in range(0, 5):
                    cell_x = (x + 1) * MARKER_CELL_SIZE
                    none_zero_count = cv2.countNonZero(marker_image[
                        cell_y:(cell_y + MARKER_CELL_SIZE), cell_x:(cell_x + MARKER_CELL_SIZE)
                    ])
                    if none_zero_count > MARKER_CELL_SIZE * MARKER_CELL_SIZE / 2:
                        bit_matrix[y, x] = 1
                    else:
                        bit_matrix[y, x] = 0

            good_marker = False
            for rotation_idx in range(0, 4):
                if self.__hammingDistance(bit_matrix) == 0:
                    good_marker = True
                    break
                bit_matrix = self.__bitMatrixRotate(bit_matrix)
            if not good_marker:
                continue

            final_marker = possible_markers[i]
            final_marker.m_id = self.__bitMatrixToId(bit_matrix)
            final_marker.m_corners = np.roll(final_marker.m_corners, -rotation_idx, axis=0)
            final_markers.append(final_marker)
        return final_markers
    def extract_patches_tumor(self, bounding_boxes):
        """
            Extract both, negative patches from Normal area and positive patches from Tumor area

            Save extracted patches to desk as .png image files

            :param bounding_boxes: list of bounding boxes corresponds to detected ROIs
            :return:
            
        """
        mag_factor = pow(2, self.level_used)

        print('No. of ROIs to extract patches from: %d' % len(bounding_boxes))

        for i, bounding_box in enumerate(bounding_boxes):
            b_x_start = int(bounding_box[0]) * mag_factor
            b_y_start = int(bounding_box[1]) * mag_factor
            b_x_end = (int(bounding_box[0]) + int(bounding_box[2])) * mag_factor
            b_y_end = (int(bounding_box[1]) + int(bounding_box[3])) * mag_factor
            X = np.random.random_integers(b_x_start, high=b_x_end, size=500)
            Y = np.random.random_integers(b_y_start, high=b_y_end, size=500)
            # X = np.arange(b_x_start, b_x_end-256, 5)
            # Y = np.arange(b_y_start, b_y_end-256, 5)

            for x, y in zip(X, Y):
                patch = self.wsi_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))
                mask = self.mask_image.read_region((x, y), 0, (PATCH_SIZE, PATCH_SIZE))
                mask_gt = np.array(mask)
                # mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)
                mask_gt = cv2.cvtColor(mask_gt, cv2.COLOR_BGR2GRAY)
                patch_array = np.array(patch)

                white_pixel_cnt_gt = cv2.countNonZero(mask_gt)

                if white_pixel_cnt_gt == 0:  # mask_gt does not contain tumor area
                    patch_hsv = cv2.cvtColor(patch_array, cv2.COLOR_BGR2HSV)
                    lower_red = np.array([20, 20, 20])
                    upper_red = np.array([200, 200, 200])
                    mask_patch = cv2.inRange(patch_hsv, lower_red, upper_red)
                    white_pixel_cnt = cv2.countNonZero(mask_patch)

                    if white_pixel_cnt > ((PATCH_SIZE * PATCH_SIZE) * 0.50):
                        # mask = Image.fromarray(mask)
                        patch.save(PROCESSED_PATCHES_TUMOR_NEGATIVE_PATH + PATCH_NORMAL_PREFIX +
                                   str(self.negative_patch_index), 'PNG')
                        # mask.save(PROCESSED_PATCHES_NORMAL_PATH + PATCH_NORMAL_PREFIX + str(self.patch_index),
                        #           'PNG')
                        self.negative_patch_index += 1
                else:  # mask_gt contains tumor area
                    if white_pixel_cnt_gt >= ((PATCH_SIZE * PATCH_SIZE) * 0.85):
                        patch.save(PROCESSED_PATCHES_POSITIVE_PATH + PATCH_TUMOR_PREFIX +
                                   str(self.positive_patch_index), 'PNG')
                        self.positive_patch_index += 1

                patch.close()
                mask.close()
	def checkForTennisBall(self, img):
		imgPlain =  img.copy()

		# Convert image to HSV
		img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
		gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

		# define the uppper and lower bounds for the hsv mask
		lower = np.array([29, 120, 120], dtype= "uint8")
		upper =  np.array([33, 255, 255], dtype= "uint8")

		mask = cv2.inRange(img, lower, upper)


		# apply a series of erosions and dilations to the mask
		# using an elliptical kernel
		# the key here is that we need to erode and dialate at different rates. 
		# erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
		# dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8, 8))
		# mask = cv2.erode(mask, erode, iterations = 2)
		# mask = cv2.dilate(mask, dilate, iterations = 2)


		mask = cv2.erode(mask, None, iterations = 2)
		mask = cv2.dilate(mask, None, iterations = 2)


		print "size = " + str(cv2.countNonZero(mask))

		# circles = cv2.CreateMat(grayImg.width, 1, cv.CV_32FC3)
		circles = cv2.HoughCircles(mask, cv2.cv.CV_HOUGH_GRADIENT, 1.2, 100)


		 
		# finalImage, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
		contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
		cv2.drawContours(imgPlain, contours, -1, (255,255,0), 3)
 
		if len(contours) >0:
			c = max(contours, key=cv2.contourArea)
			((x,y), radius) = cv2.minEnclosingCircle(c)

			M= cv2.moments(c)
			center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

			if radius > 10:
				cv2.circle(imgPlain, (int(x), int(y)), int(radius), (0,255,255),2)
				cv2.circle(imgPlain, center, 5, (0, 0, 255), -1)

		if (cv2.countNonZero(mask) > 2000):
			logImages.logTennis(imgPlain)
			return True
		else:
			return False
    def feed(self, image):
        if self.backImg == None:
            self.backImg = image.copy()
            self.prevImg = image.copy()
            self.height, self.width = image.shape
            self.bestRun = np.ones((self.height,self.width), np.uint8)
            self.currentRun = np.ones((self.height,self.width), np.uint8)
            self.minFixedPixel = int(image.size*self.PERCENTAGE)
            #print self.minFixedPixel
            if self.showBackImage:
                self.createTrackbars()
                cv2.imshow("backImage", self.backImg)
            return self.backImg

        self.checkSettings()
        
        diffImage = cv2.absdiff(self.prevImg,image)
        ret, threshold1 = cv2.threshold(diffImage, self.THRESHOLD, 1, cv2.THRESH_BINARY_INV)
        ret, threshold255 = cv2.threshold(diffImage, self.THRESHOLD, 255, cv2.THRESH_BINARY_INV)
        
        nonZero = cv2.countNonZero(threshold1)
        nonZeroRatio = nonZero / float(image.size)
        perfection = self.PERFECTION
        if nonZeroRatio < self.PERCENTAGE:
            perfection = 5
            print perfection
        
        nonChanged = cv2.bitwise_and(self.currentRun, threshold255)
        self.currentRun = cv2.add(threshold1,nonChanged)

        newBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_GE)
        oldBestsMask = cv2.compare(self.currentRun, self.bestRun, cv2.CMP_LT)

        newBestRuns = cv2.bitwise_and(self.currentRun, self.currentRun, mask = newBestsMask)
        oldBestRuns = cv2.bitwise_and(self.bestRun, self.bestRun, mask = oldBestsMask)
        self.bestRun = cv2.add(newBestRuns, oldBestRuns)

        newBackImgPoints = cv2.bitwise_and(image, image,mask = newBestsMask)
        oldBackImgPoints = cv2.bitwise_and(self.backImg, self.backImg, mask = oldBestsMask)
        self.backImg = cv2.add(newBackImgPoints, oldBackImgPoints)

        stablePoints = cv2.compare(self.bestRun, perfection, cv2.CMP_GT)
        unstablePoints = cv2.bitwise_not(stablePoints)
        stablePoints = cv2.bitwise_and(stablePoints, perfection)
        unstablePoints = cv2.bitwise_and(unstablePoints, self.bestRun)
        self.bestRun = cv2.add(stablePoints, unstablePoints)
        
        self.nonZeroPoints = cv2.countNonZero(stablePoints)
        self.prevImg = image.copy()

        if self.showBackImage:
            cv2.imshow("backImage", self.backImg)
            
        return self.backImg
def find_file_Crop_Region(aFile):
    global gCropMargin
    src = cv2.imread(aFile,0);
    if src is None:
        print("ERROR:Fail to read image %s"%(aFile))
        return (0,0,0,0)

    image_row, image_col=src.shape

    src_blur = cv2.medianBlur(src, 3)

    gray_min = src_blur.min()
    gray_max = src_blur.max()
    crop_threshold = gray_min + (gray_max-gray_min)/10

    print("gray_min,gray_max=%s,%s"%(gray_min,gray_max))

    ret, gray_bin = cv2.threshold(src_blur, crop_threshold, 255, cv2.THRESH_BINARY)

    left, top, right, bot=0,0,0,0

    for j in range(image_col):
        col = gray_bin[:,j]
        cnt = cv2.countNonZero(col)
        if cnt > 0:
            left = j
            break;
    for j in range(image_col)[::-1]:
        col = gray_bin[:,j]
        cnt = cv2.countNonZero(col)
        if cnt > 0:
            right = j
            break;
    for i in range(image_row):
        row = gray_bin[i,:]
        cnt = cv2.countNonZero(row)
        if cnt > 0:
            top = i
            break;
    for i in range(image_row)[::-1]:
        row = gray_bin[i,:]
        cnt = cv2.countNonZero(row)
        if cnt > 0:
            bot = i
            break;

    if (left, top, right, bot) != (0,0,0,0):
        sx = (left - gCropMargin >0) and (left - gCropMargin) or 0
        sy = (top - gCropMargin >0) and (top - gCropMargin) or 0
        ex = (right + gCropMargin >image_col-1) and (image_col-1) or (right + gCropMargin)
        ey = (bot + gCropMargin >image_row-1) and (image_row-1) or (bot + gCropMargin)

    return sx, sy, ex, ey
Exemple #17
0
 def discover_light(self, value_img):
     dummy, value_240 = cv2.threshold(value_img, 240, 255, cv2.THRESH_BINARY)
     dummy, value_thr = cv2.threshold(value_img, self.thr, 255, cv2.THRESH_BINARY)
     no_white_240 = cv2.countNonZero(value_240)
     no_white_thr = cv2.countNonZero(value_thr)
     cnts, hier = cv2.findContours(value_240, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
     rects = [cv2.boundingRect(cnt) for cnt in cnts if cv2.contourArea(cnt) > 20000]
     if no_white_thr*0.4 <= no_white_240 and len(rects) > 0:
         self.light = "Day"
         self.thr = 240
     else:
         self.light = "Night"
Exemple #18
0
    def verifySizes(self, img):
        """
        Verify character size
        Called during segmentation on order to check if the given segment
        follows certain propreties common to characters. If it does, returns
        true and the input is considered a character. Otherwise returns false.

        @params
            img: numpy array (image)

        @return
            boolean
        """

        aspect = 33.0/60.0
        error = 0.25
        charAspect = float(img.shape[1])/float(img.shape[0])

        minHeight = 18.0
        maxHeight = 45.0

        minAspect = 0.15
        maxAspect = aspect + (aspect*error)

        if self.debug:
            print(str(cv2.countNonZero(img)))

        area = cv2.countNonZero(img)
        bbArea = img.shape[0]*img.shape[1]

        percPixels = float(area)/float(bbArea)

        if self.debug:
            print('verify sizes')
            print('aspect: ' + str(aspect))
            print('min/max aspect: ' + str(minAspect) + ',' + str(maxAspect) + ' state: ' + str((charAspect > minAspect)) + '/' + str((charAspect < maxAspect)))
            print('area: ' + str(percPixels) + ' state: ' + str((percPixels < 0.8) or (percPixels > 0.95)))
            print('char aspect: ' + str(charAspect))
            print('char height: ' + str(img.shape[0]) + ' state: min = ' + str((img.shape[0] >= minHeight)) + '/max = ' + str((img.shape[0] < maxHeight)))

        if ((percPixels < 0.8) or (percPixels > 0.95)) and (charAspect > minAspect) and (charAspect < maxAspect) and (img.shape[0] >= minHeight) and (img.shape[0] < maxHeight):
            if self.debug:
                print('True')
                print('----------------')
                print('\n')
            return True
        else:
            if self.debug:
                print('False')
                print('----------------')
                print('\n')
            return False
Exemple #19
0
 def lumFilter1(self, src, featuresToHold=1):
     img1 = self.origFilter(src)
     img2 = self.densityConnector(img1,0.9)
     featureVec = self.liquidFeatureExtraction(img2)
 
     #remove features clinging to image boundary
     m=0
     while(m<len(featureVec)):
         edges = np.zeros(img2.shape,np.uint8)
         contour = self.findBoundary(featureVec[m])
         cv2.drawContours(edges,contour,-1,(255))
         count = self.countEdgeTouching(edges,10,15)
         total = cv2.countNonZero(edges)
         percent = float(count)/total
         featurePixCount = cv2.countNonZero(featureVec[m])
         imagePixCount = cv2.countNonZero(img2)
         percentOfImage = float(featurePixCount)/imagePixCount
         #printf("%d/%d: %f, %f\n",count,total,percent,percentOfImage);
         #imgshow(edges);
         if(percent>=0.47 and percentOfImage<0.40):
             del featureVec[m:]
             featureVec.erase(featureVec.begin()+m)
         else:
             m+=1
 
     countPix = 0
     countVec = []
     for i in range(0,len(featureVec)):
         countPix = cv2.countNonZero(featureVec[i])
         countVec.append(countPix)
     countVec, idxVec = jaysort.jaysort(countVec)
     matVec = []
     element = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
     n=1
     while(True):
         try:
             result = cv2.morphologyEx(featureVec[idxVec[len(idxVec)-n]],cv2.MORPH_CLOSE,element)
             matVec.append(np.copy(result))
             #imwrite("img"+toString(n)+".png",matVec.at(matVec.size()-1));
             n+=1
             if(len(matVec)>=featuresToHold):
                 break
             if(n>len(idxVec)):
                 break
         except Exception:
             traceback.print_exc()
             print("Catch #1: ShapeMorph::lumFilter1() out of range!")
             print("n: {}".format(n))
             print("featureVec.size() = {}".format(len(featureVec)))
             print("idxVec.size() = {}".format(len(idxVec)))
             exit(1)
     return matVec
Exemple #20
0
def CalculateData(result,ref,index):
    global output

    dataArray=[cv2.countNonZero(result),cv2.countNonZero(ref),cv2.countNonZero(cv2.bitwise_and(result,ref)),cv2.countNonZero(cv2.bitwise_and(result,cv2.bitwise_not(ref))),cv2.countNonZero(cv2.bitwise_and(ref,cv2.bitwise_not(result))),cv2.countNonZero(cv2.bitwise_and(cv2.bitwise_not(result),cv2.bitwise_not(ref)))] 
    p=(dataArray[2]/float(dataArray[2]+dataArray[3]))
    print "P is %f" % p
    output[count-1][index*3+0]=p
    tfp=(dataArray[3]/float (dataArray[3]+dataArray[5]))
    print "TFP is %f" % tfp
    output[count-1][index*3+1]=tfp      
    tfn=(dataArray[4]/float (dataArray[2]+dataArray[4]))
    print "TFN is %f" % tfn
    output[count-1][index*3+2]=tfn    
def remove_unwanted(mask,image):
	_,contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE);	
	mask1 = np.zeros(image[:,:,0].shape,np.uint8);		
	for cnt in contours:
			mask = np.zeros(image[:,:,0].shape,np.uint8);		
			cv2.drawContours(mask,[cnt],0,255,-1);
			area = cv2.contourArea(cnt);
			im = cv2.bitwise_and(image,image,mask=mask);
			_,grad = remove_abundant(image,mask);
			if(cv2.countNonZero(grad)/area>0.5):
				print cv2.countNonZero(grad)/area;
				cv2.drawContours(mask1,[cnt],0,255,-1);						
	return(mask1); 
Exemple #22
0
def getFillPct(image, contour):
    mask = np.zeros((image.shape[0], image.shape[1]), np.uint8)
    cv2.drawContours(mask, [contour], 0, 255, thickness=-1)

    bwImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    bwImage = cv2.adaptiveThreshold(bwImage, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 111, 2)
    intersectImage = cv2.bitwise_and(mask, bwImage)

    if DEBUGFILL:
        showImage(image, wait=False)
        showImage(bwImage, "bw2", wait=False)
        showImage(intersectImage, "int")
    return float(cv2.countNonZero(intersectImage)) / cv2.countNonZero(mask)
Exemple #23
0
 def process(self):
     self.total_pixels = self.img.size / self.img.ndim
     self.hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
     self.blue_pixels = cv2.inRange(self.hsv, self.LOWER_BLUE, self.UPPER_BLUE)
     self.blue_pixels_count = cv2.countNonZero(self.blue_pixels)
     self.blue_fraction = self.blue_pixels_count / self.total_pixels
     self.grey_pixels = cv2.inRange(self.hsv, self.LOWER_GREY, self.UPPER_GREY)
     self.grey_pixels_count = cv2.countNonZero(self.grey_pixels)
     self.grey_fraction = self.grey_pixels_count / self.total_pixels
     self.saturation['mean'] = self.hsv[:, :, 1].ravel().mean() / 255
     self.saturation['var'] = self.hsv[:, :, 1].ravel().var()
     self.brightness['mean'] = self.hsv[:, :, 2].ravel().mean() / 255
     self.brightness['var'] = self.hsv[:, :, 2].ravel().var()
Exemple #24
0
 def distinguish(img, boxes):
     dy = 10
     dx = 10
     if len(boxes) == 0:
         return None
     if len(boxes) == 1:
         return boxes[0]
     if len(boxes) == 2:
         x1,y1,x2,y2 = boxes[0]
         yp = int(1/float(2)*(y2-y1))
         xp = int(1/float(2)*(x2-x1))
         roi1 = img[y1:y2, x1:x2]
         gray1 = cv2.cvtColor(roi1, cv2.COLOR_BGR2GRAY)
         dummy, gray1 = cv2.threshold(gray1, 100, 255, cv2.THRESH_BINARY)
         #contours
         contours, hier = cv2.findContours(gray1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
         cnts = [cnt for cnt in contours if 50 < cv2.contourArea(cnt)]
         print len(cnts), "hand"
         l1 = len(cnts)
         color1 = cv2.cvtColor(gray1, cv2.COLOR_GRAY2BGR)
         cv2.drawContours(color1,cnts,-1,(0,255,0),1)
         cv2.imshow('hand', color1)
         #ratio
         gray1_piece = gray1[yp:yp+dy,xp:xp+dy]
         gray1_all = (x2-x1)*dy
         gray1_white = cv2.countNonZero(gray1_piece)
         ratio1 = gray1_white/float(gray1_all)
         
         x1,y1,x2,y2 = boxes[1]
         yp = int(1/float(8)*(y2-y1))
         xp = int(1/float(2)*(x2-x1))
         roi2 = img[y1:y2, x1:x2]
         gray2 = cv2.cvtColor(roi2, cv2.COLOR_BGR2GRAY)
         dummy, gray2 = cv2.threshold(gray2, 100, 255, cv2.THRESH_BINARY)
         #contours
         contours, hier = cv2.findContours(gray2, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
         cnts = [cnt for cnt in contours if 50 < cv2.contourArea(cnt)]
         print len(cnts), "face"
         l2 = len(cnts)
         color = cv2.cvtColor(gray2, cv2.COLOR_GRAY2BGR)
         cv2.drawContours(color,contours,-1,(0,255,0),1)
         #ratio
         gray2_piece = gray2[yp:yp+dy,xp:xp+dy]
         cv2.imshow('face', color)
         gray2_all = (x2-x1)*dy
         gray2_white = cv2.countNonZero(gray2_piece)
         ratio2 = gray2_white/float(gray2_all)
         
         if l1 < l2:
             return boxes[0]
         return boxes[1]
def skeletonization(img):
    '''
    http://opencvpython.blogspot.ru/2012/05/skeletonization-using-opencv-python.html
    '''
    img = img.copy()
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    size = np.size(img)
    skel = np.zeros(img.shape, np.uint8)

    # ret, img = cv2.threshold(img, 127, 255, 0)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 7, 2)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))

    while True:
        eroded = cv2.erode(img, element)
        temp = cv2.dilate(eroded, element)
        temp = cv2.subtract(img, temp)
        skel = cv2.bitwise_or(skel, temp)
        img = eroded.copy()

        zeros = size - cv2.countNonZero(img)
        if zeros == size:
            break

    cv2.imwrite("skel.png", skel)
    return skel
Exemple #26
0
def skeletonize(image, size, structuring=cv2.MORPH_RECT):
    # determine the area (i.e. total number of pixels in the image),
    # initialize the output skeletonized image, and construct the
    # morphological structuring element
    area = image.shape[0] * image.shape[1]
    skeleton = np.zeros(image.shape, dtype="uint8")
    elem = cv2.getStructuringElement(structuring, size)

    # keep looping until the erosions remove all pixels from the
    # image
    while True:
        # erode and dilate the image using the structuring element
        eroded = cv2.erode(image, elem)
        temp = cv2.dilate(eroded, elem)

        # subtract the temporary image from the original, eroded
        # image, then take the bitwise 'or' between the skeleton
        # and the temporary image
        temp = cv2.subtract(image, temp)
        skeleton = cv2.bitwise_or(skeleton, temp)
        image = eroded.copy()

        # if there are no more 'white' pixels in the image, then
        # break from the loop
        if area == area - cv2.countNonZero(image):
            break

    # return the skeletonized image
    return skeleton
Exemple #27
0
    def action():
        global has_previous_diff

        if has_previous_diff:
            has_previous_diff = False
            face = find_face(images[2])
            if len(face) > 0:
                point = face[0][0:2] + face[0][2:4]
                print point
                px = (point[0] - (IMAGE_W / 2)) / 100
                py = (point[1] - (IMAGE_H / 2)) / 100
                rotate(px, py)

            return

        diff = get_diff()
        if diff is False:
            return

        diff_rate = cv2.countNonZero(diff)
        # print "diff_rate: {0}".format(diff_rate)
        if diff_rate > IMAGE_W * IMAGE_H * 0.01:
            # has_previous_diff = True

            angle_deg = get_motion_angle(diff)
            print "angle: {0}".format(angle_deg)
            if angle_deg and angle_deg != 0.0:
                angle_rad = np.radians(angle_deg)
                px = np.sin(angle_rad) / 2
                py = np.cos(angle_rad)
                print "x: {0}, y: {1}".format(px, py)
                rotate(px, py)

                return
 def train(self):
     ustep = self.range_dist[0]/self.hist_bins[0]
     vstep = self.range_dist[1]/self.hist_bins[1]
     #calc n, X_i and mu
     mu=np.array((1,2))
     n = cv2.countNonZero(self.f_hist);
     count = 0;
     N = 0;
     X=np.array((n,2))
     f=[]
     for ubin in self.hist_bins[0]:
         for vbin in self.hist_bins[1]:
             histval = self.f_hist[ubin][vbin];
             if histval > 0:
                 sampleX = ((1,2) << self.low_range[0] + self.ustep * (ubin+.5), self.low_range[1] + vstep * (vbin+.5))
                 count=count+1
                 sampleX=X.row[count]
                 f.append(histval)
                 mu = mu+ histval * sampleX;
                 N += histval
    
     mu /= N;
     #calc psi - mean of DB
     self.psi=cv2.reduce(X, self.psi,0, cv.CV_REDUCE_AVG)
     #calc Lambda
     self.Lambda=np.zeros((2,2),np.uint)
     for i in n:
         X_m_mu = (X.row[i] - mu)
         prod = f[i] * X_m_mu.t() * X_m_mu
         self.Lambda += prod;
     self.Lambda /= N;
     linv = self.Lambda.inv();
     self.Lambda_inv.val[0] = linv[0][0]
     self.Lambda_inv.val[1] = linv[0][1]
     self.Lambda_inv.val[2] = linv[1][0]
Exemple #29
0
 def calculateRoiPixels(self): 
     roi = cv2.imread(self.croppedImage)
     allPixelMinimum = np.array([0,0,0], np.uint8)
     allPixelMaximum = np.array([255,255,255], np.uint8)
     dstAll = cv2.inRange(roi, allPixelMinimum, allPixelMaximum)
     pixels = cv2.countNonZero(dstAll)
     return pixels 
def imgToMove(msg):
    #Convert it to an OpenCV image
    bridge = cv_bridge.CvBridge()
    cvImg = np.array(bridge.imgmsg_to_cv(msg, "bgr8"), dtype=np.uint8)
    #Threshold it, parameters from ImageSlicer.cpp
    hsvImg = cv2.cvtColor(cvImg, cv2.cv.CV_BGR2HSV)
    H,S,V = cv2.split(hsvImg)
    H = cv2.threshold(H, 165, 65536, cv2.cv.CV_THRESH_BINARY)
    S = cv2.threshold(S, 45, 65536, cv2.cv.CV_THRESH_BINARY)
    out = cv2.bitwise_and(H[1], S[1])

    #Slice it and count white pixels
    slices = 5
    counts = []
    regionWidth = out.shape[1]/slices
    for ii in range(slices):
        roi = out[0:out.shape[0],ii*regionWidth:(ii*regionWidth)+regionWidth]
        counts.append(cv2.countNonZero(roi))
        
    #Decide on a move
    bestMove = "N"
    leftCount = rightCount = 0
    for ii in range(3):
        leftCount += counts[ii]
        rightCount += counts[4-ii]
    if abs(leftCount - rightCount) > 500:
        #Enough difference, we move
        if leftCount > rightCount:
            bestMove = "L"
        else:
            bestMove = "R"
    else:
        #Not different enough, no move
        bestMove = "N"
    return bestMove
Exemple #31
0
    def detectMark(self, img):
        self.file = open('data.txt', 'w+')
        #dt = self.DetectQuestions(img)
        self.questions = contours.sort_contours(self.questions,
                                                method="top-to-bottom")[0]
        res = []
        bubbled = None
        self.alt = int(input('Number of alternatives per question: '))
        self.num_col = int(input('Number of gabarito columns: '))
        for (q, i) in enumerate(np.arange(0, len(self.questions), self.alt)):
            self.cont = 0
            cnts = contours.sort_contours(self.questions[i:i + self.alt])[0]
            self.questionsCnts += 1
            for (j, c) in enumerate(cnts):
                mask = np.zeros(self.thresh.shape, dtype="uint8")
                cv2.drawContours(mask, [c], -1, (255, 255, 255), -1)
                mask = cv2.bitwise_and(self.thresh, self.thresh, mask=mask)
                numPixels = cv2.countNonZero(mask)
                # print('pixel de cada questao:',numPixels)
                if numPixels >= len(self.thresh) * 0.7:
                    # print('MARK', cont)
                    res.append(c)
                    self.cont += 1
                    bubbled = (numPixels, j)
                    cv2.drawContours(img, res, -1, (0, 255, 0), 2)
                    if self.cont == 1:
                        if j == 0:
                            j = 'A'
                        if j == 1:
                            j = 'B'
                        if j == 2:
                            j = 'C'
                        if j == 3:
                            j = 'D'
                        if j == 4:
                            j = 'E'
                        self.id = j
            if self.cont > 2:
                self.id = 'nulo'
            elif self.cont == 0:
                self.id = 'white'
            #f.write('{} {}\n'.format(self.questionsCnts, self.id))
            #print('Questao', self.questionsCnts, ':', self.id)
            self.file.write('{}: {}\n'.format(self.questionsCnts, self.id))
        self.file = "data.txt"
        self.dict1 = {}
        with open(self.file) as fh:
            for line in fh:
                command, description = line.strip().split(None, 1)

                self.dict1[command] = description.strip()
        self.totalQuestions = self.questionsCnts
        self.Rows = int(self.questionsCnts / self.num_col)
        self.Cols = int(self.alt * (self.num_col * self.gab))
        self.dict = {
            "Gab": self.gab,  #num gabaritos
            "Number Cols": self.num_col,
            "Rows": self.Rows,
            "Cols": self.Cols,
            "Questions": self.totQuestions,
            "Mark": self.dict1  #questoes e respostas
        }
        print('Modelo', self.dict)
        return self.dict
Exemple #32
0
def check_red_circles(image):
    #blurred = cv2.GaussianBlur (image, (11, 11), 0)
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # construct a mask for the color "green", then perform
    # a series of dilations and erosions to remove any small
    # blobs left in the mask
    # lower mask (0-10)
    #mask0 = cv2.inRange (hsv, lower_white, upper_white)
    #mask0 = cv2.inRange (hsv, lower_col1, upper_col1)
    # upper mask (170-180)
    mask1 = cv2.inRange(hsv, lower_col2, upper_col2)
    # join my masks
    #cmask = mask0 + mask1
    cmask = mask1
    #
    #cmask = cv2.erode (cmask, None, iterations=2)
    #cmask = cv2.dilate (cmask, None, iterations=2)
    #iname = "./raw/mask-{}.png".format (datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
    #cv2.imwrite (iname, cmask)
    #detect circles
    #"""
    circles = cv2.HoughCircles(cmask,
                               cv2.HOUGH_GRADIENT,
                               1,
                               60,
                               param1=100,
                               param2=20,
                               minRadius=c_r_min,
                               maxRadius=c_r_max)
    #            60, param1=100, param2=20, minRadius=c_r_min, maxRadius=c_r_max)
    #process circles
    c_x = 0
    c_y = 0
    c_r = 0
    if circles is not None:
        #iname = "./raw/image-{}.png".format (datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
        #cv2.imwrite (iname, image)
        #print ("#i:saving frame {}".format (iname))
        #circles = np.uint16 (np.around (circles))
        kTS = "{}".format(datetime.now().strftime("%Y%m%d-%H%M%S-%f"))
        for i in circles[0, :]:
            c_x = int(i[0])
            c_y = int(i[1])
            c_r = int(i[2]) - 4  #autocrop the 'red' circle
            #print("#i:detected circle {}x{}r{}".format(c_x, c_y, c_r))
            if c_x > c_r and c_y > c_r:
                #crop the image area containing the circle
                tsr_img = image.copy()
                tsr_img = tsr_img[c_y - c_r:c_y + c_r, c_x - c_r:c_x + c_r]
                #
                #print("#i:circle size {} {}x{}r{}".format (tsr_img.shape, c_x, c_y, c_r))
                if tsr_img.shape[0] == tsr_img.shape[1]:
                    # draw mask
                    mask = np.full((c_r * 2, c_r * 2), 0,
                                   dtype=np.uint8)  # mask is only
                    cv2.circle(mask, (c_r, c_r), c_r, (255, 255, 255), -1)
                    # get first masked value (foreground)
                    fg = cv2.bitwise_or(tsr_img, tsr_img, mask=mask)
                    # get second masked value (background) mask must be inverted
                    mask = cv2.bitwise_not(mask)
                    bg = np.full(tsr_img.shape, 255, dtype=np.uint8)
                    bk = cv2.bitwise_or(bg, bg, mask=mask)
                    # combine foreground+background
                    final = cv2.bitwise_or(fg, bk)
                    gray = cv2.cvtColor(final, cv2.COLOR_BGR2GRAY)
                    #gray = tsr_img
                    ret, gray = cv2.threshold(gray, b_th, 255,
                                              cv2.THRESH_BINARY)
                    #cv2.imwrite (iname, gray)
                    wpk = cv2.countNonZero(gray)
                    tpk = gray.shape[0] * gray.shape[1]
                    if wpk > tpk * 70 / 100 and wpk < tpk * 80 / 100:
                        #print ("#i:white pixels {} in {}".format(wpk, tpk))
                        global kFot
                        kFot = kFot + 1
                        #iname = "./raw/ori-image-{}_{}.png".format (kTS, kFot)
                        #cv2.imwrite (iname, final)
                        #iname = "./raw/thd-image-{}_{}.png".format (kTS, kFot)
                        #print ("#i:saved {}".format (iname))
                        #cv2.imwrite (iname, gray)
                        # send to OCR engine for interpretation
                        tsrfocr.save(gray)
                        #tsrfocr.save (tsr_img)
                    #iname = "./raw/thd-gray-{}_{}.png".format (kTS, kFot)
            # draw the outer circle
            cv2.circle(image, (c_x, c_y), c_r, (0, 0, 255), 2)
Exemple #33
0
    scale = 1
    m = cv2.getRotationMatrix2D(center, angle, scale)
    frame = cv2.warpAffine(frame, m, (h, w))
    blur = cv2.GaussianBlur(frame, (21, 21), 0)
    hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)

    lower = [20, 30, 35]
    upper = [35, 255, 255]
    lower = np.array(lower, dtype="uint8")
    upper = np.array(upper, dtype="uint8")
    mask = cv2.inRange(hsv, lower, upper)

    output = cv2.bitwise_and(frame, hsv, mask=mask)
    #print(np.nonzero(output))
    #print("\n")
    no_red = cv2.countNonZero(mask)
    cv2.imshow("output", output)

    # get threshold image
    gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (21, 21), 0)
    ret, thresh = cv2.threshold(blur, 230, 255, cv2.THRESH_OTSU)

    # combine frame and the image difference
    img2 = cv2.addWeighted(frame1, 0.8, img1, 0.2, 0)

    # get contours and set bounding box from contours
    img3, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_NONE)
    if len(contours) != 0:
        for c in contours:
Exemple #34
0
def calculateFrameStats(sourcePath, verbose=False, after_frame=0):
    cap = cv2.VideoCapture(sourcePath)

    data = {"frame_info": []}

    # print(cap)
    # print(sourcePath)
    # print(cap.isOpened())

    lastFrame = None
    lastFrameFound = False
    while (cap.isOpened()):

        # print(cap)
        ret, frame = cap.read()
        # (frame)
        if ret == False:
            break

        frame_number = cap.get(cv2.CAP_PROP_POS_FRAMES) - 1

        # Convert to grayscale, scale down and blur to make
        # calculate image differences more robust to noise

        try:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = scale(gray, 0.25, 0.25)
            gray = cv2.GaussianBlur(gray, (9, 9), 0.0)

        except:
            print(ret)

        if frame_number < after_frame:
            lastFrame = gray
            lastFrameFound = True
            continue

        if lastFrameFound != None:

            diff = cv2.subtract(gray, lastFrame)

            diffMag = cv2.countNonZero(diff)

            frame_info = {
                "frame_number": int(frame_number),
                "diff_count": int(diffMag)
            }
            data["frame_info"].append(frame_info)

            if verbose:
                cv2.imshow('diff', diff)
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

        # Keep a ref to his frame for differencing on the next iteration
        lastFrame = gray

    cap.release()
    cv2.destroyAllWindows()

    #compute some states

    diff_counts = [fi["diff_count"] for fi in data["frame_info"]]
    # print(diff_counts)

    data["stats"] = {
        "num": len(diff_counts),
        "min": np.min(diff_counts),
        "max": np.max(diff_counts),
        "mean": np.mean(diff_counts),
        "median": np.median(diff_counts),
        "sd": np.std(diff_counts)
    }
    greater_than_mean = [
        fi for fi in data["frame_info"]
        if fi["diff_count"] > data["stats"]["mean"]
    ]
    greater_than_median = [
        fi for fi in data["frame_info"]
        if fi["diff_count"] > data["stats"]["median"]
    ]
    greater_than_one_sd = [
        fi for fi in data["frame_info"]
        if fi["diff_count"] > data["stats"]["sd"] + data["stats"]["mean"]
    ]
    greater_than_two_sd = [
        fi for fi in data["frame_info"]
        if fi["diff_count"] > (data["stats"]["sd"] * 2) + data["stats"]["mean"]
    ]
    greater_than_three_sd = [
        fi for fi in data["frame_info"]
        if fi["diff_count"] > (data["stats"]["sd"] * 3) + data["stats"]["mean"]
    ]

    data["stats"]["greater_than_mean"] = len(greater_than_mean)
    data["stats"]["greater_than_median"] = len(greater_than_median)
    data["stats"]["greater_than_one_sd"] = len(greater_than_one_sd)
    data["stats"]["greater_than_three_sd"] = len(greater_than_three_sd)
    data["stats"]["greater_than_two_sd"] = len(greater_than_two_sd)

    return data
def findRed(img):
    mask = cv2.inRange(img, lower_red, upper_red)
    count = cv2.countNonZero(mask)
    print("No of red pixels---->", count)
    return count
def findBlue(img):
    mask = cv2.inRange(img, lower_blue, upper_blue)
    count = cv2.countNonZero(mask)
    print("No of blue pixels---->", count)
    return count
Exemple #37
0
    def solve(paths):

        print("[INFO] reading paths...")
        imagePaths = sorted(list(paths.list_images(args["images"])))
        images = []

        print("[INFO] loading images...")
        for imagePath in imagePaths:
            image = cv2.imread(imagePath)
            images.append(image)

            # инициализация OpenCVшного ститчера и склейка (без обрезки)
            print("[INFO] stitching images...")
            stitcher = cv2.createStitcher() if imutils.is_cv3(
            ) else cv2.Stitcher_create()
            (status, stitched) = stitcher.stitch(images)

            # если статус равен 0, OpenCV успешно завершил работу
            if status == 0:
                #проверяем, нужно ли обрезать фотографи.
                if args["crop"] > 0:
                    # создаем 10пиксельную границу, окружающую изображение
                    print("[INFO] cropping image...")
                    stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10,
                                                  cv2.BORDER_CONSTANT,
                                                  (0, 0, 0))

                    # переводим склееное изображение в серый цвет для
                    # трешолдинга так, что пиксели, большие чем 0 = 255
                    # все остальные остаются 0
                    gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)
                    thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
                    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
                    cnts = imutils.grab_contours(cnts)
                    c = max(cnts, key=cv2.contourArea)

                    # выделение памяти на маску, которая будет содержать в себе
                    # конечную фотографию
                    mask = np.zeros(thresh.shape, dtype="uint8")
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)

                    # создаем две копии маски: одну для минимальной прямоуг. площади
                    # вторую для подсчета кол-ва пикселей, которые надо удалить
                    minRect = mask.copy()
                    sub = mask.copy()

                    # пока есть ненулевые пиксели, мы их убираем
                    while cv2.countNonZero(sub) > 0:
                        minRect = cv2.erode(minRect, None)
                        sub = cv2.subtract(minRect, thresh)

                    # находим контур минимального прямоуг. и выделяем из начальной фотографии его
                    cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,
                                            cv2.CHAIN_APPROX_SIMPLE)
                    cnts = imutils.grab_contours(cnts)
                    c = max(cnts, key=cv2.contourArea)
                    (x, y, w, h) = cv2.boundingRect(c)
                    stitched = stitched[y:y + h, x:x + w]

                cv2.imwrite(args["output"], stitched)

                cv2.imshow("Stitched", stitched)
                cv2.waitKey(0)
            else:
                print("[INFO] image stitching failed! ({})".format(status))
Exemple #38
0
	# this just prints out the matrices of the video capture
	print(ret) 
	print(frame)

	# convert to grayscale	
	gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
	
	cv2.imshow('video', frame)
	
	if loop_counter == 0:
		t_minus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
		t = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
		t_plus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

	# if there is no movement, the pixel count is below the threshold, then take the picture
	if cv2.countNonZero(diffImg(t_minus, t, t_plus)) <= threshold and timeCheck != datetime.now().strftime('%Ss'):
		img_name = "asl_cam{}.png".format(img_counter)
		cv2.imwrite(img_name, gray)
		print("{} written!".format(img_name))
		img_counter += 1
	
	timeCheck = datetime.now().strftime('%Ss')

	loop_counter += 1

	t_minus = t
	t = t_plus
	t_plus = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
	
	# when you press esc it exits cam
	k = cv2.waitKey(1) & 0xff
# binarize
ret2,img_bin2 = cv2.threshold(fimg,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imwrite('binarized2.jpg',img_bin2) 

# remove noise
cont_img = 255-img_bin2
contours, hierarchy = cv2.findContours(cont_img, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)

comp_img = img_bin2.copy()
npall = img_bin2.shape[0] * img_bin2.shape[1];
width = img_bin2.shape[1]
height = img_bin2.shape[0]
print width
print height
np = npall - cv2.countNonZero(img_bin2);
nc = len(contours)
nt = (2*(np/nc))/100
for cnt in contours:
    # reject bad components: delete those which are above mid-line and are not long enough (80% of text line??)
              
    x,y,w,h = cv2.boundingRect(cnt)
    if w>0.8*width and y>height/2:
        #for pnt in cnt:
        #    x = pnt[0][0]
        #    y = pnt[0][1]
        #    img[y,x]=(0,0,255)
        cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),1)


fname = 'D:/imgs/'+img_fn+'_ulines.jpg'
Exemple #40
0
     ret, image = videoCapture.read()
     if ret:
         # Generate work image by blurring
         workImg = cv2.blur(image, (8, 8))
         # Generate moving average image if needed
         if movingAvgImg is None:
             movingAvgImg = numpy.float32(workImg)
         # Generate moving average image
         cv2.accumulateWeighted(workImg, movingAvgImg, .03)
         diffImg = cv2.absdiff(workImg, cv2.convertScaleAbs(movingAvgImg))
         # Convert to grayscale
         grayImg = cv2.cvtColor(diffImg, cv2.COLOR_BGR2GRAY)
         # Convert to BW
         return_val, grayImg = cv2.threshold(grayImg, 25, 255, cv2.THRESH_BINARY)
         # Total number of changed motion pixels
         motionPercent = 100.0 * cv2.countNonZero(grayImg) / totalPixels
         # Detect if camera is adjusting and reset reference if more than maxChange
         if motionPercent > 25.0:
             movingAvgImg = numpy.float32(workImg)
         movementLocations = contours(grayImg)
         # Threshold trigger motion
         if motionPercent > 0.5:
             framesWithMotion += 1
             for x, y, w, h in movementLocations:
                 # Draw rectangle around fond object
                 cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
         videoWriter.write(image)
         frames += 1
     else:
         lastFrame = True
 elapsed = time.time() - start
Exemple #41
0
        ((0, 0), (dW, h // 2)),  # top-left
        ((w - dW, 0), (w, h // 2)),  # top-right
        ((0, (h // 2) - dHC), (w, (h // 2) + dHC)),  # center
        ((0, h // 2), (dW, h)),  # bottom-left
        ((w - dW, h // 2), (w, h)),  # bottom-right
        ((0, h - dH), (w, h))  # bottom
    ]
    on = [0] * len(segments)

    # loop over the segments
    for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
        # extract the segment ROI, count the total number of
        # thresholded pixels in the segment, and then compute
        # the area of the segment
        segROI = roi[yA:yB, xA:xB]
        total = cv2.countNonZero(segROI)
        area = (xB - xA) * (yB - yA)

        # if the total number of non-zero pixels is greater than
        # 50% of the area, mark the segment as "on"
        if total / float(area) > 0.5:
            on[i] = 1

    # lookup the digit and draw it on the image
    digit = DIGITS_LOOKUP[tuple(on)]
    digits.append(digit)
    cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
    cv2.putText(output, str(digit), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
                0.65, (0, 255, 0), 2)

# display the digits
plt.yticks([])

#Put back into bgr from hsv
im5 = cv2.cvtColor(im4, cv2.COLOR_HSV2BGR)

#Print image 5 (kmeans + gauss + bgr)
plt.subplot(325)
plt.title('HSV to BGR', fontsize=10)
plt.imshow(im5, 'gray')
plt.xticks([])
plt.yticks([])

#Count plant pixels - looks for colors in the dark purple region
plant_pixel = cv2.inRange(im5, np.array([40, 30, 40]), np.array([120, 90,
                                                                 120]))
plant_pixel_no = cv2.countNonZero(plant_pixel)

#Count reference pixels - looks in the orange region
ref_pixel = cv2.inRange(im5, np.array([220, 110, 30]), np.array([260, 180,
                                                                 80]))
ref_pixel_no = cv2.countNonZero(ref_pixel)

#Calculate area per pixel, then find canopy area
pixel_area = 1 / ref_pixel_no  #inches in^2
area = pixel_area * plant_pixel_no  #inches in^2

#Print text showing the calculated area
ax = fig.add_subplot(326)
ax.text(0.5,
        0.5,
        "The canopy area is %0.02f in^2" % area,
import cv2
import numpy as np

original = cv2.imread("TrafficPPT.png")
image_to_compare = cv2.imread("Target car.png")



# 1) Check if 2 images are equals
if original.shape == image_to_compare.shape:
    print("The images have same size and channels")
    difference = cv2.subtract(original, image_to_compare)
    b, g, r = cv2.split(difference)
    cv2.imshow("Difference",difference)
    if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
        print("The images are completely Equal")
    else:
        print("The images are NOT equal")
		
# 2) Check for similarities between the 2 images

sift = cv2.xfeatures2d.SIFT_create()
kp_1, desc_1 = sift.detectAndCompute(original, None)
kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)
#print(len(kp_1))
#print(desc_1)
#cv2.imshow("Desc1",cv2.resize(desc_1,(500,500)))
#cv2.imshow("Desc2",cv2.resize(desc_2,(500,500)))
index_params = dict(algorithm=0, trees=5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
Exemple #44
0
    def cortar(self, baldosa):
        """Cortar la sección mínima con las letras.
        """
        # base
        gris = cv2.cvtColor(baldosa, cv2.COLOR_BGR2GRAY)
        umbral = cv2.adaptiveThreshold(gris, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                       cv2.THRESH_BINARY_INV, 19, 0)

        c = 7
        dy, dx = umbral.shape
        umbral = cv2.resize(umbral, (int(c * dx), int(c * dy)))
        baldosa = cv2.resize(baldosa, (int(c * dx), int(c * dy)))

        estructura = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
        mascara = np.zeros(umbral.shape, np.uint8)
        while True:
            erosionado = cv2.erode(umbral, estructura)
            temp = cv2.dilate(erosionado, estructura)
            temp = cv2.subtract(umbral, temp)
            mascara = cv2.bitwise_or(mascara, temp)
            umbral = erosionado.copy()
            if not cv2.countNonZero(umbral): break

        contornos = cv2.findContours(mascara, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_NONE)[0]

        valrect = lambda r: r[2] < r[3]
        esrecth = lambda c: valrect(cv2.boundingRect(c))

        numc = np.array([len(contorno) for contorno in contornos])
        mu = numc.mean()
        sigma = numc.std()
        lon = mu + 2.32 * sigma

        contornos = [c for c in contornos if lon < len(c) and esrecth(c)]

        # if __debug__:
        #     vi.util.dibujar_rectangulos(baldosa, contornos, 2)
        #     cv2.imshow('seg_esq1', baldosa)

        if not len(contornos): return None

        # fronteras : x, y, dx, dy
        fronteras = np.array([cv2.boundingRect(c) for c in contornos])
        fronteras[:, 2] += fronteras[:, 0]
        fronteras[:, 3] += fronteras[:, 1]
        ex = -4
        x_min, y_min = fronteras[:, 0].min() - ex, fronteras[:, 1].min() - ex
        x_max, y_max = fronteras[:, 2].max() + ex, fronteras[:, 3].max() + ex

        gris = cv2.cvtColor(baldosa, cv2.COLOR_BGR2GRAY)
        corte = gris[y_min:y_max, x_min:x_max]
        corte = cv2.threshold(corte, 75, 255, cv2.THRESH_BINARY_INV)[1]
        ex = 12
        corte = cv2.copyMakeBorder(corte,
                                   ex,
                                   ex,
                                   ex,
                                   ex,
                                   cv2.BORDER_CONSTANT,
                                   value=(0, 0, 0))

        dy, dx = corte.shape
        corte = cv2.resize(corte, (int(0.2 * dx), int(0.2 * dy)))

        # if __debug__:
        #     cv2.imshow('seg_esq2', corte)
        #     cv2.waitKey()

        return corte
Exemple #45
0
def grading(img):

    circleRadius = 10;
    MARKED_COUNT = 120;

    answer = [0, 1, 2, 3, 3, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4];

    #image = cv2.imread("C:\\Users\\xSzy\\Downloads\\rsz_testimg4.jpg");
    image = cv2.imread(img)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);

    blurred = cv2.GaussianBlur(gray, (5, 5), 0);
    edged = cv2.Canny(blurred, 75, 200);

    # find contours in the edge map, then initialize
    # the contour that corresponds to the document
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    docCnt = None

    if len(cnts) > 0:
        # sort the contours according to their size in
        # descending order
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

        # loop over the sorted contours
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.02 * peri, True)

            # if our approximated contour has four points,
            # then we can assume we have found the paper
            if len(approx) == 4:
                docCnt = approx
                break

    # apply a four point perspective transform to both the
    # original image and grayscale image to obtain a top-down
    # birds eye view of the paper
    paper = four_point_transform(image, docCnt.reshape(4, 2))
    warped = four_point_transform(gray, docCnt.reshape(4, 2))
    #paper = image;
    #warped = gray;

    # apply Otsu's thresholding method to binarize the warped
    # piece of paper
    thresh = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # find contours in the thresholded image, then initialize
    # the list of contours that correspond to questions
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    questionCnts = []

    for c in cnts:
        # compute the bounding box of the contour, then use the
        # bounding box to derive the aspect ratio
        (x, y, w, h) = cv2.boundingRect(c)
        ar = w / float(h)

        # in order to label the contour as a question, region
        # should be sufficiently wide, sufficiently tall, and
        # have an aspect ratio approximately equal to 1
        if (w >= circleRadius and h >= circleRadius and ar >= 0.8 and ar <= 1.2):
            questionCnts.append(c)

    # sort the question contours top-to-bottom, then initialize
    # the total number of correct answers
    questionCnts = contours.sort_contours(questionCnts,
                                          method="top-to-bottom")[0]

    correct = 0;

    # loop for each row
    for (q, i) in enumerate(np.arange(0, len(questionCnts), 10)):
        # sort contours from left to right
        cnts = contours.sort_contours(questionCnts[i:i + 10])[0]
        # loop for 5 by once
        for col in range(0, 2):
            marked = 0
            marked_list = []
            for (j, c) in enumerate(cnts[col*5:col*5+5], col*5):
                # create a mask to check if it is marked or not
                mask = np.zeros(thresh.shape, dtype="uint8")
                cv2.drawContours(mask, [c], -1, 255, -1)
                # check
                mask = cv2.bitwise_and(thresh, thresh, mask=mask)
                total = cv2.countNonZero(mask)
                #print(q, j, total)

                # if 1 bypassed a certain value, answer is marked
                if(total > MARKED_COUNT):
                    marked += 1
                    #cv2.drawContours(paper, c, -1, (255, 0, 0), 2)
                    marked_list.append(j);
                #else:
                    #cv2.drawContours(paper, c, -1, (0, 255, 0), 2)
                cv2.imshow("img", paper)
                cv2.waitKey(50);
            print("marked = ", marked)
            # if one is marked, check if it is true or false
            if marked == 1:
                correctanswer = answer[col*10+q]
                print("Correct answer: ", answer[col*10+q])
                print("Marked: ", marked_list[0]-(5*col))
                if marked_list.count(correctanswer+5*col) > 0:
                    print("right")
                    correct += 1
                    cv2.drawContours(paper, cnts[marked_list.pop()], -1, (0, 255, 0), 2)
                else:
                    print("wrong")
                    cv2.drawContours(paper, cnts[marked_list.pop()], -1, (0, 0, 255), 2)
            # 2 or more answer, doesn't give point
            elif marked >= 2:
                for (j, c) in enumerate(cnts[col * 5:col * 5 + 5], col * 5):
                    if marked_list.count(j) > 0:
                        cv2.drawContours(paper, c, -1, (0, 0, 255), 2);
            # no answer as well
            elif marked == 0:
                for (j, c) in enumerate(cnts[col * 5:col * 5 + 5], col * 5):
                    cv2.drawContours(paper, c, -1, (0, 0, 255), 2);

    print("Correct answer: ", correct);
    #paper = cv2.resize(paper, (600, 800));
    cv2.imshow("img", paper);
    cv2.destroyAllWindows()
    cv2.waitKey(0);
    return correct;
Exemple #46
0
cap.set(cv2.CAP_PROP_FRAME_WIDTH, W)

CROP_W, CROP_H = 140,80


while(True):
    ret, frame = cap.read()
    W, H, _ = frame.shape
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    mask = cv2.inRange(frame, (0,0,0), (250,250,250))
    frame_canny = cv2.Canny(mask, threshold1=150, threshold2=300)

    # crop RoI
    roi = frame_canny[CROP_H:-CROP_H, CROP_W:-CROP_W]

    num_white = cv2.countNonZero(roi)
    print(num_white)
    is_exist = True if num_white > 800 else False

    if is_exist:
        # find moments
        mu = cv2.moments(roi, False)
        x, y = int(mu["m10"]/mu["m00"])+CROP_W, int(mu["m01"]/mu["m00"])+CROP_H
        frame = cv2.circle(frame, (x, y), 20, (0, 0, 255), -1)

    output_image = np.zeros((W*2, H*2, 3))
    output_image[:W, :H] = frame/255
    output_image[W:, :H] = np.stack((mask, mask, mask), axis=-1)
    output_image[:W, H:] = np.stack((frame_canny, frame_canny, frame_canny), axis=-1)
    output_image[W+CROP_H:-CROP_H, H+CROP_W:-CROP_W] = np.stack((roi, roi, roi), axis=-1)
    cv2.imshow('demo', output_image)
def findGreen(img):
    mask = cv2.inRange(img, lower_green, upper_green)
    count = cv2.countNonZero(mask)
    print("No of green pixels---->", count)
    return count
Exemple #48
0
    def Adaptive_sliding_window(self, out_img, window, x_current, nonzeroy,
                                nonzerox):

        # Horizontal Strip
        hori_strip = self.img[self.win_y_low[window]:self.win_y_high[window],
                              0:out_img.shape[1]]

        # Create a Search Window
        k = l = 0
        percent_white_pixels_el = percent_white_pixels_er = 1.0
        self.search_complete = False

        if self.rect_sub_ROI == True:
            el_area = er_area = float(self.semi_major * self.semi_minor)
            col_ind = np.int(self.win_y_high[window] - self.win_y_low[window])
        else:
            el_area = er_area = float(math.pi * self.semi_major *
                                      self.semi_minor / 2)
            col_ind = int(
                (self.win_y_high[window] - self.win_y_low[window]) / 2)

        center_left = center_right = (x_current, col_ind)

        while (self.search_complete == False) and (center_left[0] >= 0) and (
                center_right[0] <= self.img.shape[1]):

            if (percent_white_pixels_el > self.whitePixels_thers):

                # create a white filled ellipse
                mask_left = np.zeros_like(hori_strip)

                center_left = (np.int(x_current - self.increment * k), col_ind)

                if self.rect_sub_ROI == True:
                    sw_xleft_low = center_left[0] - self.semi_major
                    sw_xleft_high = center_left[0]
                    mask_left = cv2.rectangle(mask_left, (sw_xleft_low, 0),
                                              (sw_xleft_high, col_ind), 255,
                                              -1)
                else:
                    mask_left = cv2.ellipse(mask_left, center_left, self.axes,
                                            self.Leftangle,
                                            self.LeftstartAngle,
                                            self.LeftendAngle, 255, -1)

                # Bitwise AND operation to black out regions outside the mask
                self.result_left = np.bitwise_and(hori_strip, mask_left)

                percent_white_pixels_el = float(
                    cv2.countNonZero(self.result_left) / el_area)
                k += 1

            if (percent_white_pixels_er > self.whitePixels_thers):

                # create a white filled ellipse
                mask_right = np.zeros_like(hori_strip)

                center_right = (np.int(x_current + self.increment * l),
                                col_ind)

                if self.rect_sub_ROI == True:
                    sw_xright_low = center_right[0]
                    sw_xright_high = center_right[0] + self.semi_major
                    mask_right = cv2.rectangle(mask_right, (sw_xright_low, 0),
                                               (sw_xright_high, col_ind),
                                               (255), -1)
                else:
                    mask_right = cv2.ellipse(mask_right, center_right,
                                             self.axes, self.Rightangle,
                                             self.RightstartAngle,
                                             self.RightendAngle, 255, -1)

                # Bitwise AND operation to black out regions outside the mask
                self.result_right = np.bitwise_and(hori_strip, mask_right)

                percent_white_pixels_er = float(
                    cv2.countNonZero(self.result_right) / er_area)
                l += 1

            if (percent_white_pixels_el <= self.whitePixels_thers) and (
                    percent_white_pixels_er <= self.whitePixels_thers):

                #if (k>2) and (l>2):
                self.search_complete = True
                # else:
                #      percent_white_pixels_el = 1.0
                #      percent_white_pixels_er = 1.0
            # cv2.rectangle(out_img,(sw_x_low,win_y_low),(sw_x_high,win_y_high), (0,0,255), 5)

        # print window, k, l, self.semi_major, self.semi_minor

        # self.mask_example_r = cv2.ellipse(self.mask_example_r, (center_left[0],(self.win_y_low[window]+self.win_y_high[window])/2), self.axes, self.Leftangle, self.LeftstartAngle, self.LeftendAngle, (255,255,255), -1)
        # self.mask_example_r = cv2.ellipse(self.mask_example_r, (center_right[0],(self.win_y_low[window]+self.win_y_high[window])/2), self.axes, self.Rightangle, self.RightstartAngle, self.RightendAngle, (255,255,255), -1)

        # Update the Window Size based on New Margins
        margin_ll = abs(center_left[0] - x_current)
        margin_rr = abs(center_right[0] - x_current)

        win_x_low = x_current - margin_ll
        win_x_high = x_current + margin_rr

        # Identify the nonzero pixels in x and y within the window
        good_inds1 = ((nonzeroy >= self.win_y_low[window]) &
                      (nonzeroy < self.win_y_high[window]) &
                      (nonzerox >= win_x_low) &
                      (nonzerox < win_x_high)).nonzero()[0]

        # Identify the x and y positions of all nonzero pixels in the image
        if len(self.result_left):
            nonzero1 = self.result_left.nonzero()
            nonzeroy1 = np.array(nonzero1[0])
            nonzeroy1_n = np.array(nonzero1[0] + self.win_y_low[window])
            nonzerox1 = np.array(nonzero1[1])

            if self.rect_sub_ROI == True:
                good_inds2_n = (((nonzeroy1 + self.win_y_low[window]) >=
                                 self.win_y_low[window]) &
                                ((nonzeroy1 + self.win_y_low[window]) <
                                 self.win_y_high[window])
                                & (nonzerox1 >= win_x_low - (self.semi_major))
                                & (nonzerox1 < win_x_low)).nonzero()[0]
                good_inds2 = ((nonzeroy1 >= self.win_y_low[window]) &
                              (nonzeroy1 < (self.win_y_high[window]))
                              & (nonzerox1 >= sw_xleft_low) &
                              (nonzerox1 < sw_xleft_high)).nonzero()[0]
            else:
                good_inds2_n = (((nonzeroy1 + self.win_y_low[window]) >=
                                 self.win_y_low[window]) &
                                ((nonzeroy1 + self.win_y_low[window]) <
                                 self.win_y_high[window])
                                & (nonzerox1 >= win_x_low - (self.semi_major))
                                & (nonzerox1 < win_x_low)).nonzero()[0]
                good_inds2 = ((nonzeroy1 >= 0) & (nonzeroy1 < col_ind)
                              & (nonzerox1 >= win_x_low - (self.semi_major)) &
                              (nonzerox1 < win_x_low)).nonzero()[0]

        if len(self.result_right):

            nonzero2 = self.result_right.nonzero()
            nonzeroy2 = np.array(nonzero2[0])
            nonzeroy2_n = np.array(nonzero2[0] + self.win_y_low[window])
            nonzerox2 = np.array(nonzero2[1])
            if self.rect_sub_ROI == True:
                good_inds3_n = (((nonzeroy2 + self.win_y_low[window]) >=
                                 self.win_y_low[window]) &
                                ((nonzeroy2 + self.win_y_low[window]) <
                                 self.win_y_high[window])
                                & (nonzerox2 >= win_x_high) &
                                (nonzerox2 < win_x_high +
                                 (self.semi_major))).nonzero()[0]
                good_inds3 = ((nonzeroy2 >= self.win_y_low[window]) &
                              (nonzeroy2 < self.win_y_high[window])
                              & (nonzerox2 >= sw_xright_low) &
                              (nonzerox2 < sw_xright_high)).nonzero()[0]
            else:
                good_inds3_n = (((nonzeroy2 + self.win_y_low[window]) >=
                                 self.win_y_low[window]) &
                                ((nonzeroy2 + self.win_y_low[window]) <
                                 self.win_y_high[window])
                                & (nonzerox2 >= win_x_high) &
                                (nonzerox2 < win_x_high +
                                 (self.semi_major))).nonzero()[0]
                good_inds3 = ((nonzeroy2 >= 0) & (nonzeroy2 < col_ind)
                              & (nonzerox2 >= win_x_high) &
                              (nonzerox2 < win_x_high +
                               (self.semi_major))).nonzero()[0]

        # good_inds = [good_inds1, good_inds2, good_inds3]
        # good_inds = np.concatenate(good_inds)
        #print good_inds
        total_ypoints = []
        total_xpoints = []
        if len(self.result_left) and len(self.result_right):

            total_ypoints = [
                nonzeroy1_n[good_inds2_n], nonzeroy[good_inds1],
                nonzeroy2_n[good_inds3_n]
            ]
            total_xpoints = [
                nonzerox1[good_inds2_n], nonzerox[good_inds1],
                nonzerox2[good_inds3_n]
            ]

            total_xpoints = np.concatenate(total_xpoints)
            total_ypoints = np.concatenate(total_ypoints)

            out_img[nonzeroy[good_inds1],
                    nonzerox[good_inds1]] = [255, 0, 0]  #[255, 0, 100]
            # # print nonzeroy2+self.win_y_high[window]
            # # out_img[nonzeroy1[good_inds2]+self.win_y_low[window], nonzerox1[good_inds2]] = [255, 0, 0] #[255, 0, 100]
            # # out_img[nonzeroy1[good_inds3]+self.win_y_low[window], nonzerox1[good_inds3]] = [255, 0, 0] #[255, 0, 100]
            out_img[nonzeroy1_n[good_inds2_n],
                    nonzerox1[good_inds2_n]] = [255, 0, 0]  #[255, 0, 100]
            out_img[nonzeroy2_n[good_inds3_n],
                    nonzerox2[good_inds3_n]] = [255, 0, 0]  #[255, 0, 100]

            # print nonzeroy1[good_inds2], self.win_y_high[window], (self.win_y_high[window]-1)-nonzeroy1[good_inds2]

            # If you found > minpix pixels, recenter next window on their mean position
            if len(total_xpoints) > self.minpix:
                x_current = np.int(
                    np.mean(total_xpoints))  #nonzerox[good_inds]

            if self.rect_sub_ROI == True:
                # cv2.rectangle(out_img, (sw_xleft_low,self.win_y_low[window]),(sw_xleft_high,self.win_y_high[window]), (0,255,0), 5)
                # cv2.rectangle(out_img, (sw_xright_low,self.win_y_low[window]),(sw_xright_high,self.win_y_high[window]), (0,255,0), 5)
                cv2.line(out_img, (sw_xleft_low, self.win_y_low[window]),
                         (sw_xleft_high, self.win_y_low[window]), (0, 255, 0),
                         3)
                cv2.line(out_img, (sw_xleft_low, self.win_y_high[window]),
                         (sw_xleft_high, self.win_y_high[window]), (0, 255, 0),
                         3)
                cv2.line(out_img, (sw_xleft_low, self.win_y_low[window]),
                         (sw_xleft_low, self.win_y_high[window]), (0, 255, 0),
                         3)

                cv2.line(out_img, (sw_xright_low, self.win_y_low[window]),
                         (sw_xright_high, self.win_y_low[window]), (0, 255, 0),
                         3)
                cv2.line(out_img, (sw_xright_low, self.win_y_high[window]),
                         (sw_xright_high, self.win_y_high[window]),
                         (0, 255, 0), 3)
                cv2.line(out_img, (sw_xright_high, self.win_y_low[window]),
                         (sw_xright_high, self.win_y_high[window]),
                         (0, 255, 0), 3)
            else:
                y_center = (self.win_y_low[window] +
                            self.win_y_high[window]) / 2
                #print center_left[0],center_right[0], y_center
                cv2.ellipse(out_img, (int(center_left[0]), y_center),
                            self.axes, self.Leftangle, self.LeftstartAngle,
                            self.LeftendAngle, self.color_ellipse,
                            self.thickness_ellipse)
                cv2.ellipse(out_img, (int(center_right[0]), y_center),
                            self.axes, self.Rightangle, self.RightstartAngle,
                            self.RightendAngle, self.color_ellipse,
                            self.thickness_ellipse)

                # cv2.rectangle(out_img,(win_x_low, self.win_y_low[window]),(win_x_high, self.win_y_high[window]), (0,255,0), 5)
                # cv2.line(out_img, (center_left[0], self.win_y_low[window]), (center_left[0], self.win_y_high[window]), (0,255,0), 5)
                # cv2.line(out_img, (center_right[0], self.win_y_low[window]), (center_right[0], self.win_y_high[window]), (0,255,0), 5)
                # cv2.arrowedLine(out_img, (x_current, y_center), (center_left[0], y_center), (255, 0, 0), 5, 8, 0, 0.3)
                # cv2.arrowedLine(out_img, (x_current, y_center), (center_right[0], y_center), (255, 0, 0), 5, 8, 0, 0.3)

        return out_img, x_current, (
            win_x_low, win_x_high
        ), total_ypoints, total_xpoints  #out_img, good_inds, total_ypoints, total_xpoints
def findYellow(img):
    mask = cv2.inRange(img, lower_yellow, upper_yellow)
    count = cv2.countNonZero(mask)
    print("No of yellow pixels---->", count)
    return count
Exemple #50
0
def readfile(path):
    #===============================
    #path = "scantron-100.jpg"
    widthImg = 1245
    heightImg=3000
    question =50
    choices = 5
    #===============================
    img = cv2.imread(path)

    #PREPROCESSING
    img = cv2.resize(img,(widthImg,heightImg))
    imgContours = img.copy()
    imgBiggestContours = img.copy()
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,10,50)

    # FINDING ALL CONTOURS
    countours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    cv2.drawContours(imgContours,countours,-1,(0,255,0),10)

    #FIND RECTANGLES
    rectCon = utlis.rectContour(countours)
    biggestContour = utlis.getCornerPoints(rectCon[0])
    gradePoints = utlis.getCornerPoints(rectCon[1])
    test = biggestContour.copy()
    test[0][0]=[333,2617]
    test[1][0]=[331,437]
    test[2][0]=[775,437]
    test[3][0]=[778,2617]
    #print("ttt:",test)
    #print("\n for contour\n",biggestContour )
    #print("\n for grade\n",gradePoints)
    biggestContour=test

    if biggestContour.size != 0 and gradePoints.size != 0:
        cv2.drawContours(imgBiggestContours,biggestContour,-1,(0,255,0),20)
        cv2.drawContours(imgBiggestContours,gradePoints,-1,(255,0,0),20)

        biggestContour= utlis.reorder(biggestContour)
        gradePoints = utlis.reorder(gradePoints)

        pt1 = np.float32(biggestContour)
        pt2= np.float32([[0,0],[widthImg,0],[0,heightImg],[widthImg,heightImg]])
        matrix = cv2.getPerspectiveTransform(pt1,pt2)
        imgWarpColored = cv2.warpPerspective(img,matrix,(widthImg,heightImg))

        ptG1 = np.float32(gradePoints)
        ptG2 = np.float32([[0,0],[325,0],[0,150],[325,150]])
        matrixG = cv2.getPerspectiveTransform(ptG1, ptG2)
        imgGradeDisplay = cv2.warpPerspective(img, matrixG,(325, 150))
        #cv2.imshow("Grade", imgGradeDisplay)

        imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)
        imgThresh = cv2.threshold(imgWarpGray,150,255,cv2.THRESH_BINARY_INV)[1]
        #cv2.imshow("Grade", imgThresh)
        
        boxes = utlis.splitBoxes(imgThresh)
        #cv2.imshow("test", boxes[4])
        #print(cv2.countNonZero(boxes[2]),cv2.countNonZero(boxes[0]))


        #GETTING NO ZERO PIXEL VALUES OF EACH BOX
        myPixelVal = np.zeros((question,choices))
        countC = 0
        countR = 0

        for image in boxes:
            totalPixels = cv2.countNonZero(image)
            myPixelVal[countR][countC] = totalPixels
            countC +=1
            if (countC == choices):countR+=1; countC=0
        #print(myPixelVal)

        global myIndex
        localmyIndex = []
        for x in range(0, question):
            arrline = myPixelVal[x]
            arrmed= np.median(arrline)
            localmyIndex.append(-1)
            for y in range(0,choices):
                if(myPixelVal[x][y]/arrmed > 2):
                    localmyIndex[x]=y
        myIndex = localmyIndex




    imgBlank = np.zeros_like(img)
    imageArray = ([img,imgGray,imgBlur,imgCanny],
    [imgContours,imgBiggestContours,imgWarpColored,imgThresh])
    imgStacked = utlis.stackImages(imageArray,0.5)


    #cv2.imshow("stacked images",imgStacked)
    cv2.waitKey(0)
Exemple #51
0
 def cal_hamming_distance(self, model_hash_code, search_hash_code):
     # 返回不相同的个数
     diff = np.uint8(np.bitwise_xor(model_hash_code, search_hash_code))
     return cv2.countNonZero(diff)
Exemple #52
0
while cap.isOpened() == True:
    # Aquire image
    ret, frame = cap.read()

    # Convert to hsv colorspace
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # Create mask
    upper_lim = np.array([upper_value, 255, 255])
    lower_lim = np.array([lower_value, 125, 125])
    mask = cv2.inRange(hsv, lower_lim, upper_lim)

    # Overlay mask with image (bitwise AND)
    res = cv2.bitwise_and(frame, frame, mask=mask)

    if cv2.countNonZero(mask) > 50:
        #print("Green Light!")
        GPIO.output(4, GPIO.LOW)
    else:
        GPIO.output(4, GPIO.HIGH)
#    cv2.imshow(WIN1,frame)
#    cv2.imshow(WIN2,mask)
#    cv2.imshow(WIN3,res)
#    cv2.resizeWindow(WIN1,WIDTH,HEIGHT)
#    cv2.resizeWindow(WIN2,WIDTH,HEIGHT)
#    cv2.resizeWindow(WIN3,WIDTH,HEIGHT)
    key = cv2.waitKey(30)
    if key <= -1:
        key = 0
    if chr(key) == 'q':
        print("Upper Limit:", upper_value)
Exemple #53
0
                        # Color range for red       
                        ([17, 15, 100], [50, 56, 200])
                ]
                
                # Loop over the boundaries
                for (lower, upper) in boundaries:
                        # Create NumPy arrays from the boundaries
                        lower = np.array(lower, dtype = "uint8")
                        upper = np.array(upper, dtype = "uint8")
                 
                        # Find the colors within the specified boundaries and apply
                        # the mask
                        mask = cv2.inRange(image, lower, upper)                        
                       
                        # Merge the mask into the accumulated masks
                        accumMask = cv2.bitwise_or(accumMask, mask)
               

                # Show the images
                # cv2.imshow("images", np.hstack([accumMask]))               
                unmasked = cv2.countNonZero(accumMask)

                if unmasked:
                    print "has red"
                    ser.write('z') # Kosara's code
                else:
                    print "none"
                    ser.write('n') # Kosara's code
               
                cv2.destroyAllWindows()
Exemple #54
0
        while (current_frame < _debug_MissFrames):
            ret, frame = cap.read()
            current_frame += 1
        '''

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if current_frame > 0:

            current_mouse_position = CurrentMouseLocation(gray, current_frame)

            #colored_pixels = countNonBackGroundPixels(gray, BlackThreshold, LastValueFrame);

            ret, binaryImage = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)

            colored_pixels = cv2.countNonZero(binaryImage)

            #print(colored_pixels)

            if (colored_pixels > MouseColorValue
                    and colored_pixels - LastValueFrame > ThresholdDrawing):
                current_connected_frame = 0

            else:
                current_connected_frame += 1

            if (colored_pixels > MouseColorValue
                    and colored_pixels - LastValueFrame > ThresholdDrawing
                ) or (current_connected_frame < MaxConnectFrames):
                #print("Drawing")
                                           method='top-to-bottom')[0]
correct = 0

# each q has 5 possible answers, looping over qs in batches of 5
for (q, i) in enumerate(np.arange(0, len(questions_contour), 5)):
    # sorting contours for current q from L->R
    cnts = contours.sort_contours(questions_contour[i:i + 5])[0]
    bubbled = None
    # to determine which bubble is filled, use threshold - T image and count the number of non-zero pixels in each bubble area
    for (j, c) in enumerate(cnts):
        # construct mask that only reveals current bubble
        mask = np.zeros(T.shape, dtype="uint8")
        cv2.drawContours(mask, [c], -1, 255, -1)
        #apply mask to T image + count no of non zero pixels in the bubble area
        mask = cv2.bitwise_and(T, T, mask=mask)
        total = cv2.countNonZero(mask)

        if bubbled is None or total > bubbled[0]:
            bubbled = (total, j)

        # initialize contour color and index of current ans
        color = (0, 0, 255)
        k = ANSWER_KEY[q]

        if k == bubbled[1]:
            color = (0, 255, 0)
            correct += 1

        #draw the outline of correct ans
        cv2.drawContours(paper, [cnts[k]], -1, color, 3)
 def count_pixels(self, type, mask):
     data = self.calibrations[type]
     dst = cv2.inRange(
         mask, (data['hue1_low'], data['sat_low'], data['val_low']),
         (data['hue1_high'], data['sat_high'], data['val_high']))
     return cv2.countNonZero(dst)
Exemple #57
0
        img = imutils.resize(imgRaw, width=XFWIDE)
        #w,h = cv.GetSize(img)  # width and height of image
        #xcent = w/2
        #ycent = h/2

        frameCount += 1
        gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        #        flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
        flow = cv.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3,
                                           7, 1.5, 0)
        prevgray = gray
        vt = calc_v(flow)  # returns threshold map (mask) but uint8
        #print (vt.dtype)
        #print (vt.shape)
        vt0 = vt.copy()
        mCount = cv.countNonZero(vt)
        fx = flow[:, :, 0]
        deltaFC = frameCount - lastFC  # 1 or 2 during an event
        if (mCount > vThreshold):  # significant motion detected this frame
            motionNow = True
            im2, contours, hierarchy = cv.findContours(vt0, cv.RETR_EXTERNAL,
                                                       cv.CHAIN_APPROX_SIMPLE)
            #im2, contours, hierarchy = cv.findContours(vt, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
            contours = sorted(contours, key=cv.contourArea, reverse=True)[:2]
            # cv.drawContours(img, contours, -1, (0,255,0), 2)  # draw contours on image
            cnt = contours[0]  # select the largest contour
            M = cv.moments(cnt)
            Area = M['m00']  # area of contour
            cx = int(M['m10'] / Area)
            cy = int(M['m01'] / Area)
            dcx = (cx - xcent)
Exemple #58
0
# perform a connected component analysis on the thresholded image,
# then initialize a mask to store only the "large" components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")

# loop over the unique components
for label in np.unique(labels):
    # if this is the background label, ignore it
    if label == 0:
        continue

    # otherwise, construct the label mask and count the
    # number of pixels
    labelMask = np.zeros(thresh.shape, dtype="uint8")
    labelMask[labels == label] = 255
    numPixels = cv2.countNonZero(labelMask)

    # if the number of pixels in the component is sufficiently
    # large, then add it to our mask of "large blobs"
    if numPixels > 300:
        mask = cv2.add(mask, labelMask)

# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts = contours.sort_contours(cnts)[0]

# loop over the contours
for (i, c) in enumerate(cnts):
def main():

    pan_angle = 90  # initial angle for pan
    tilt_angle = 90  # initial angle for tilt
    fw_angle = 90
    scan_count = 0
    print "Begin!"

    # loop over the frames from the video stream
    while True:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        frame = imutils.resize(frame, width=400)

        # grab the frame dimensions and convert it to a blob
        (h, w) = frame.shape[:2]
        # blob = cv2.dnn.blobFromImage(cv2.resize(frame, (16, 16))
        blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 0.007843,
                                     (300, 300), 127.5)
        newframe = resizeimage.resize_cover(frame, [16, 16])
        # pass the blob through the network and obtain the detections and
        # predictions
        net.setInput(blob)
        detections = net.forward()

        # loop over the detections
        for i in np.arange(0, detections.shape[2]):

            #####################################################
            # SEND newframe to regional_cnn class here.
            #####################################################
            # Create new r_cnn object. Initialize with image data
            r_cnn = regional_cnn.RegionalCNN(train_data=newframe,
                                             filters=32,
                                             step=32)
            pedestrian = r_cnn.train()
            print pedestrian
            if pedestrian[0][0] == 1:

                ##############################################
                # Motion detection
                ##############################################
                ret, frame = cam.read()  # read from camera
                totalDiff = cv2.countNonZero(diffImg(
                    t_minus, t, t_plus))  # this is total difference number
                text = "threshold: " + str(
                    totalDiff)  # make a text showing total diff.
                cv2.putText(frame, text, (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,
                            (0, 0, 0), 2)  # display it on screen
                movement = 0
                if totalDiff > threshold and timeCheck != datetime.now(
                ).strftime('%Ss'):
                    dimg = cam.read()[1]
                    # cv2.imwrite(datetime.now().strftime('%Y%m%d_%Hh%Mm%Ss%f') + '.jpg', dimg)
                    movement = 1
                timeCheck = datetime.now().strftime('%Ss')
                # Read next image
                t_minus = t
                t = t_plus
                t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
                cv2.imshow(winName, frame)
                key = cv2.waitKey(10)
                # if key == 27:	 # comment this 'if' to hide window
                # cv2.destroyWindow(winName)
                # break
                ########################################
                #CAR MOVEMENT
                ########################################
                # Turn right motion detected
                if movement == 1:
                    bw.speed = SPEED
                    fw.turn_right()
                    bw.forward()
                    time.sleep(1)
                    bw.stop()
                    time.sleep(1)
                    bw.backward()
                    time.sleep(1)
                    bw.stop()
                    fw.turn_left()
                else:
                    bw.speed = SPEED
                    fw.turn_left()
                    bw.forward()
                    time.sleep(1)
                    bw.stop()
                    time.sleep(1)
                    bw.backward()
                    time.sleep(1)
                    bw.stop()
                    fw.turn_right()
            # The Prediction
            # extract the confidence (i.e., probability) associated with
            # confidence = detections[0, 0, i, 2]
            # filter out weak detections by ensuring the `confidence` is
            # greater than the minimum confidence
            # if confidence > args["confidence"]:
            # # extract the index of the class label from the
            # # `detections`, then compute the (x, y)-coordinates of
            # # the bounding box for the object
            # idx = int(detections[0, 0, i, 1])
            # box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
            # (startX, startY, endX, endY) = box.astype("int")

            # draw the prediction on the frame
            # label = "{}: {:.2f}%".format(CLASSES[idx],
            # confidence * 100)
            # cv2.rectangle(frame, (startX, startY), (endX, endY),
            # COLORS[idx], 2)
            # y = startY - 15 if startY - 15 > 15 else startY + 15
            # cv2.putText(frame, label, (startX, y),
            # cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)

        # show the output frame
        # cv2.imshow("Frame", frame)
        # key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        # update the FPS counter
        fps.update()
    #rescaling the image
    W = 300.
    height, width, depth = imageA.shape
    imgScale = W / width
    newX, newY = imageA.shape[1] * imgScale, imageA.shape[0] * imgScale
    imageA = cv.resize(imageA, (int(newX), int(newY)))
    #convert image to gray and hsv color spaces
    grayA = cv.cvtColor(imageA, cv.COLOR_BGR2GRAY)
    hsv = cv.cvtColor(imageA, cv.COLOR_BGR2HSV)

    ##yellow detiction
    lower_yellow = np.array([15, 100, 100])
    upper_yellow = np.array([35, 255, 255])
    mask = cv.inRange(hsv, lower_yellow, upper_yellow)
    px = mask.shape[0] * mask.shape[1]
    whitey = cv.countNonZero(mask)

    ##green detiction
    lower_green = np.array([30, 100, 50])
    upper_green = np.array([90, 255, 255])
    mask2 = cv.inRange(hsv, lower_green, upper_green)
    px2 = mask.shape[0] * mask.shape[1]
    whiteg = cv.countNonZero(mask2)

    ##brown detiction
    lower_brown = np.array([10, 100, 20])
    upper_brown = np.array([20, 255, 200])
    mask3 = cv.inRange(hsv, lower_brown, upper_brown)
    px3 = mask.shape[0] * mask.shape[1]
    whiteb = cv.countNonZero(mask3)