예제 #1
0
def get_motions(f, fMask, thickness=1, color=(170, 170, 170)):
    '''
    Iterates over the contours in a mask and draws a bounding box
    around the ones that encompas an area greater than a threshold.
    This will return an image of just the draw bock (black bg), and
    also an array of the box points.
    '''
    rects_mot = []
    f_rects = np.zeros(f.shape, np.uint8)
    # get contours
    if imutils.is_cv3():
        _, cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    elif imutils.is_cv2():
        cnts, hierarchy = cv2.findContours(
            fMask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    # loop over the contours
    for c in cnts:
        # if the contour is too small, ignore it
        if cv2.contourArea(c) < contourThresh:
            continue

        if imutils.is_cv3():
            box = cv2.boxPoints(cv2.minAreaRect(c))
        elif imutils.is_cv2():
            box = cv2.cv.BoxPoints(cv2.minAreaRect(c))

        box = np.int0(box)
        cv2.drawContours(f_rects, [box], 0, color, thickness)
        rects_mot.append(cv2.boundingRect(c))
    return f_rects, rects_mot
예제 #2
0
def contours():

 
# load the image, convert it to grayscale, and blur it slightly
	image = cv2.imread("e.jpg")
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(image, (5, 5), 0)
	 
	# threshold the image, then perform a series of erosions +
	# dilations to remove any small regions of noise
	thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
	thresh = cv2.erode(thresh, None, iterations=2)
	thresh = cv2.dilate(thresh, None, iterations=2)

	# find contours in thresholded image, then grab the largest
	# one
	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]
	c = max(cnts, key=cv2.contourArea)
	extLeft = tuple(c[c[:, :, 0].argmin()][0])
	extRight = tuple(c[c[:, :, 0].argmax()][0])
	extTop = tuple(c[c[:, :, 1].argmin()][0])
	extBot = tuple(c[c[:, :, 1].argmax()][0])
	 
	cv2.drawContours(image, [c], -1, (0, 255, 255), 2)
	cv2.circle(image, extLeft, 8, (0, 0, 255), -1)
	cv2.circle(image, extRight, 8, (0, 255, 0), -1)
	cv2.circle(image, extTop, 8, (255, 0, 0), -1)
	cv2.circle(image, extBot, 8, (255, 255, 0), -1)
	 
	# show the output image
	
	cv2.imwrite("/home/anushka/Documents/f.jpg", image)
	cv2.waitKey(0)
예제 #3
0
    def extractFeatures(self, image):
        # convert the image to grayscale
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # detect and extract features from the image
        # --> need SIFT because the mosaic can be made from camera that takes images with different Scale and Rotation

        # if we are using opencv 2.4.9
        if imutils.is_cv2():
            # the default parameters for cv2.SIFT()
            nfeatures = 0
            nOctaveLayers = 3
            contrastTreshold = 0.04
            edgeTreshold = 10
            sigma = 1.6
            sift = cv2.SIFT(nfeatures, nOctaveLayers, contrastTreshold, edgeTreshold, sigma)
            (kps, features) = sift.detectAndCompute(gray, None)   # (image, None)
        # check to see if we are using OpenCV 3
        elif imutils.is_cv3():
            descriptor = cv2.xfeatures2d.SIFT_create()
            (kps, features) = descriptor.detectAndCompute(gray, None)

        self.logger.info("Found {} keypoints in frame".format(len(kps)))

        # convert the keypoints from KeyPoint objects to np
        kps = np.float32([kp.pt for kp in kps])
        # return a tuple of keypoints and features
        return (kps, features)
예제 #4
0
def recognize(images_path, training_path, pass_info = False):
    if "camera_1" in training_path:
        threshhold =75
        area = 110
    elif "camera_2" in training_path:
        threshhold =151
        area = 70
    elif "camera_3" in training_path:
        threshhold = 151
        area = 70
    #######   training part    ###############
    samples = np.loadtxt(training_path+'generalsamples.data', np.float32)
    responses = np.loadtxt(training_path+'generalresponses.data', np.float32)
    responses = responses.reshape((responses.size, 1))

    model = cv2.ml.KNearest_create()
    model.train(samples, cv2.ml.ROW_SAMPLE, responses)

    ############################# testing part  #########################

    image = cv2.imread(images_path)
    out = np.zeros(image.shape, np.uint8)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    thresh = cv2.threshold(blurred, threshhold, 255, cv2.THRESH_BINARY)[1]#dont change 151

    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    # new = cv2.drawContours(image, cnts, -1, (0, 255, 0), 3)
    # cv2.imshow('im', new)
    # cv2.imshow('out', out)
    # cv2.waitKey(0)
    content = []
    for c in cnts:
        # compute the center of the contour
        M = cv2.moments(c)
        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])

        # draw the contour and center of the shape on the image
        cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
        [x, y, w, h] = cv2.boundingRect(c)
        if cv2.contourArea(c)>area: #the higher the threshold, the smaller the area
            cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
            roi = thresh[y:y + h, x:x + w]
            roismall = cv2.resize(roi, (10, 10))
            roismall = roismall.reshape((1, 100))
            roismall = np.float32(roismall)
            retval, results, neigh_resp, dists = model.findNearest(roismall, k=1)
            string = str(int((results[0][0])))
            if pass_info: #returns temp between C and F symbols (camera 1)
                content.append((string, [cX,cY]))
            else:
                if string != str(42):
                    cv2.putText(out, string, (x, y + h), 0, 1, (0, 255, 0))
                    content.append(string)
    content.reverse()
    return content
예제 #5
0
    def vidFeed(self):


        self.takeFrame()

        # draw reference points and lines
        cv2.line(self.image, (self.widthHalf, 0), (self.widthHalf, self.height), (0, 255, 0), 2)  # center line
        cv2.line(self.image, (0, self.heightHalf), (self.width, self.widthHalf), (0, 255, 0), 2)  # center line
        cv2.circle(self.image, (self.widthHalf, self.heightHalf), 6, (200, 255, 0), -1)  # CenterPoint
        self.roiMask()

        # Process the image
        gray = cv2.cvtColor(self.masked_image, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (5, 5), 0)
        th = cv2.threshold(gray, 127,255, cv2.THRESH_BINARY )[1]
        th = cv2.erode(th, None, iterations=2)
        th = cv2.dilate(th, None, iterations=2)

        #finding the coutours from
        cnts = cv2.findContours(th.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

        self.thresh = th
        if cnts is not None:
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]
            c = max(cnts, key=cv2.contourArea)
            self.rohitC = c
            #find the moments of image
            M = cv2.moments(c)
            self.cX = int((M["m10"] / M["m00"]) )
            self.cY = int((M["m01"] / M["m00"]) )

            #bounding Rectangle
            self.boux, self.bouy, self.bouw, self.bouh = cv2.boundingRect(c)
            cv2.rectangle(self.image, (self.boux, self.bouy), (self.boux + self.bouw, self.bouy + self.bouh), (100, 100, 0), 2)


            cv2.putText(self.image, str(self.boux) + str(" ") + str(self.bouy)  + str(" ")+ str(self.boux+self.bouw)  + str(" ")+ str(self.bouy+self.bouh), (self.width - 400, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (20, 255, 255), 2)

            #drawpoints of countours

            cv2.circle(self.image, (self.cX,self.cY), 6, (100, 200, 255), -1)  # red
            extLeft = tuple(c[c[:, :, 0].argmin()][0])
            extRight = tuple(c[c[:, :, 0].argmax()][0])
            extTop = tuple(c[c[:, :, 1].argmin()][0])
            extBot = tuple(c[c[:, :, 1].argmax()][0])

            centerContoursX = self.cX
            error = self.centerFrameX - centerContoursX
            print(error)
            self.control(error)
            # draw the outline of the object, then draw each of the
            # extreme points, where the left-most is red, right-most
            # is green, top-most is blue, and bottom-most is teal

            cv2.drawContours(self.image, [c], -1, (0, 255, 255), 2)
            cv2.circle(self.image, extLeft, 6, (0, 0, 255), -1)  # red
            cv2.circle(self.image, extRight, 6, (0, 255, 0), -1) # green
            cv2.circle(self.image, extTop, 6, (255, 0, 0), -1) # blue
            cv2.circle(self.image, extBot, 6, (255, 255, 0), -1) # bottom
예제 #6
0
def colorShapeFromImg(request):
	if request.method == 'POST':
		# load the image and resize it to a smaller factor so that
		# the shapes can be approximated better
		image = cv2.imdecode(np.fromstring(request.FILES['file'].read(), np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED)

		resized = imutils.resize(image, width=300)
		ratio = image.shape[0] / float(resized.shape[0])

		# blur the resized image slightly, then convert it to both
		# grayscale and the L*a*b* color spaces
		blurred = cv2.GaussianBlur(resized, (5, 5), 0)
		gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
		lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
		thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]

		# find contours in the thresholded image
		cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
			cv2.CHAIN_APPROX_SIMPLE)
		cnts = cnts[0] if imutils.is_cv2() else cnts[1]

		# initialize the shape detector and color labeler
		sd = ShapeDetector()
		cl = ColorLabeler()

		response = {}

		# loop over the contours
		for c in cnts:
			# compute the center of the contour
			M = cv2.moments(c)
			cX = int((M["m10"] / M["m00"]) * ratio)
			cY = int((M["m01"] / M["m00"]) * ratio)

			# detect the shape of the contour and label the color
			shape = sd.detect(c)
			color = cl.label(lab, c)

			response['shape']=shape
			response['color']=color

			# multiply the contour (x, y)-coordinates by the resize ratio,
			# then draw the contours and the name of the shape and labeled
			# color on the image
			c=c.astype(np.float_)
			c *= ratio
			c=c.astype(np.int32)
			text = "{} {}".format(color, shape)
			cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
			cv2.putText(image, text, (cX, cY),
				cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

		return response
		result = 'result.png'
		cv2.imwrite(result, image)
		result_data = open(result, "rb").read()
		return HttpResponse(result_data, content_type="image/png")
예제 #7
0
def draw_str(dst, target, s, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(255, 255, 255), bgcolor=(0, 0, 0), thickness=1):
    x, y = target
    if imutils.is_cv3():
        line_type = cv2.LINE_AA
    if imutils.is_cv2():
        line_type = cv2.cv.CV_AA
    cv2.putText(dst, s, (x + 1, y + 1), fontFace, fontScale,
                bgcolor, thickness=thickness + 1, lineType=line_type)
    cv2.putText(dst, s, (x, y), fontFace, fontScale, color,
                thickness=thickness, lineType=line_type)
def trapezoid_mask(trapezoid, blur_radius, threshold, path, out_dir = './tmp/'):
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    result = {}
    result["S"] = 0
    result["L"] = 0
    result["R"] = 0
#    failures = {}
#    failures["S"] = []
#    failures["L"] = []
#    failures["R"] = []
    #print("[TrapezoidMask] Processing " + path)
    img = cv2.imread(path,0)
    # Darken
    img = adjust_gamma(img)
    # Mask
    mask = np.zeros(img.shape, dtype=np.uint8)
    roi_corners = np.array([trapezoid], dtype=np.int32)
    channel_count = 1  # i.e. 3 or 4 depending on your image
    ignore_mask_color = (255,)*channel_count
    cv2.fillPoly(mask, roi_corners, ignore_mask_color)
    masked_image = cv2.bitwise_and(img, mask)
    basename = os.path.basename(path)
    #cv2.imwrite(out_dir + basename.replace('.png', '.masked.png'), masked_image)
    # Find connected regions
    path = out_dir + basename.replace('.png', '.connected.png')
    labeled, n = connected_regions(masked_image, path, blur_radius, threshold)
    # Find extreme points on contour
    thresh = cv2.threshold(masked_image, 45, 255, cv2.THRESH_BINARY)[1]
    #thresh = cv2.erode(thresh, None, iterations=2)
    #thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    # Classify
    if len(cnts) > 0:
        c = max(cnts, key=cv2.contourArea)
        extLeft = tuple(c[c[:, :, 0].argmin()][0])
        extRight = tuple(c[c[:, :, 0].argmax()][0])
        if extLeft[1] < extRight[1]: # Left curve
            # print("[TrapezoidMask] " + str(n) + " parts and " + str(extLeft) + " < " + str(extRight) + ": this is a LEFT curve")
            result["L"] += 1
            #if basename[0] != 'L': failures["L"].append(basename)
        else: # Right curve
            #print("[TrapezoidMask] " + str(n) + " parts and " + str(extLeft) + " < " + str(extRight) + ": this is a RIGHT curve")
            result["R"] += 1
            #if basename[0] != 'R': failures["R"].append(basename)
    else:
        #print("[TrapezoidMask] " + str(n) + " parts and no contours: this is a STRAIGHT lane")
        result["S"] += 1
        #if basename[0] != 'S': failures["S"].append(basename)
    #print("[TrapezoidMask] Intermediate result: " + str(result))
    return result #, failures
	def __init__(self, accumWeight=0.5, deltaThresh=5, minArea=5000):
		# determine the OpenCV version, followed by storing the
		# the frame accumulation weight, the fixed threshold for
		# the delta image, and finally the minimum area required
		# for "motion" to be reported
		self.isv2 = imutils.is_cv2()
		self.accumWeight = accumWeight
		self.deltaThresh = deltaThresh
		self.minArea = minArea

		# initialize the average image for motion detection
		self.avg = None
def remove_blobs(full_image, resized_image, gray, ratio, show_plots=False):
    if show_plots:
        cv2.imshow("Thresh2", gray)
        cv2.waitKey(0)

    cnts = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # loop over the contours
    
    hsv = cv2.cvtColor(resized_image, cv2.COLOR_BGR2HSV)
예제 #11
0
def shapes2(request):

	# load the image and resize it to a smaller factor so that
	# the shapes can be approximated better
	image = cv2.imread('LcKe78Bca.png')
	resized = imutils.resize(image, width=300)
	ratio = image.shape[0] / float(resized.shape[0])

	# convert the resized image to grayscale, blur it slightly,
	# and threshold it
	gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
	blurred = cv2.GaussianBlur(gray, (5, 5), 0)
	thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

	# find contours in the thresholded image and initialize the
	# shape detector
	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]
	sd = ShapeDetector()

	# loop over the contours
	for c in cnts:
		# compute the center of the contour, then detect the name of the
		# shape using only the contour
		M = cv2.moments(c)

		cX = int((M["m10"] / M["m00"]) * ratio)
		cY = int((M["m01"] / M["m00"]) * ratio)
		shape = sd.detect(c)

		# multiply the contour (x, y)-coordinates by the resize ratio,
		# then draw the contours and the name of the shape on the image
		c=c.astype(np.float_)
		c *= ratio
		c=c.astype(np.int32)
		cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
		cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
			0.5, (60, 255, 123), 2)

	result = 'result.png'
	cv2.imwrite(result, image)
	result_data = open(result, "rb").read()
	return HttpResponse(result_data, content_type="image/png")
예제 #12
0
def get_contours(image, to_hsv=None, thresh=None, smooth=None, edge=None):
  """
  A function to get contours from a BGR image using opencv with optional thresholding
  and smoothing first.

  Args:
       to_hsv: if this value is set to True, the BGR image will be transformed
               to hsv colorspace. Default leaves it as BGR colorspace.

       thresh: If thresh is given, a threshold will be applied to the image. 
               thresh should be a 2 x 3 numpy array, where the first row is the
               lower bound and the second row is the upper bound of the threshold. 

       smooth: If smooth is given, cv2's bilateralFilter will be applied to 
               the image. smooth should be a 3-vector, indicating the arguments 
               for the smoothing function. 

       edge:   If edge is given, cv2's canny edge detector will be used on the 
               image. It should consist of a 2-vector signifying the lower bound 
               and upper bound. 
  """
  if to_hsv is True:
    image=cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
  
  if thresh is not None:
    if np.shape(thresh)!=(2, 3):
      raise TypeError('thresh must be a 2x3 array.')
    else:
      mask=cv2.inRange(image, thresh[0, :].astype(int), thresh[1, :].astype(int))

  if smooth is not None:
    if 'mask' in locals():
      mask=cv2.bilateralFilter(np.uint8(mask), smooth[0], smooth[1], smooth[2])
    else:
      mask=cv2.bilateralFilter(np.uint8(image), smooth[0], smooth[1], smooth[2])
  
  if edge is not None:
    if 'mask' in locals():
      mask=cv2.Canny(mask, edge[0], edge[1])
    else:
      mask=cv2.Canny(image, edge[0], edge[1])

  cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 
  return cnts[0] if imutils.is_cv2() else cnts[1]
예제 #13
0
def returnPoints(image):
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
	cl1 = clahe.apply(gray)
	
	thresh = cv2.Canny(cl1, 50, 100)
	thresh = cv2.dilate(thresh, None, iterations=3)
	thresh = cv2.erode(thresh, None, iterations=3)
	cv2.bitwise_not ( thresh, thresh );

	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]
	(cnts, _) = contours.sort_contours(cnts)
	pixelsPerMetric = None
	for c in cnts:
		# if the contour is not sufficiently large, ignore it
		if (cv2.contourArea(c) < 300 or cv2.contourArea(c) > 400):
			continue
 
		# compute the rotated bounding box of the contour
		'''orig = image.copy()
		box = cv2.minAreaRect(c)
		box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
		box = np.array(box, dtype="int")

		box = perspective.order_points(box)
		cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
 
		# loop over the original points and draw them
		for (x, y) in box:
			cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)'''
		M = cv2.moments(c)
        	cX = int(M["m10"] / M["m00"]) if (M["m00"]!=0) else int(M["m10"])
       		cY = int(M["m01"] / M["m00"]) if (M["m00"]!=0) else int(M["m01"])
        	cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
		
		#return orig
	return image
예제 #14
0
def countcotours(img):
    nc=0
    dir="img/"+img+".png"
    image = cv2.imread(dir)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    cv2.imshow("Image", gray)    
    cv2.waitKey(0) 
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    cv2.imshow("Image", blurred)    
    cv2.waitKey(0) 
    thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
    cv2.imshow("Image", thresh)    
    cv2.waitKey(0) 
    
            
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]      
    sd = ShapeDetector()
    
    for c in cnts:
    # compute the center of the contour
        if cv2.contourArea(c)<800:
            continue
        else:
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            shape = sd.detect(c) 
            #if(shape=="square" or shape=="rectangle"):
            if 1==1:
                cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
                cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
                cv2.putText(image, "center", (cX - 20, cY - 20),
                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                nc=nc+1 
            cv2.imshow("Image", image)
            cv2.waitKey(0) 
    return nc
예제 #15
0
def findcolors(nc,im):
    cont=0
    image = cv2.imread(im)
   
    blurred = cv2.GaussianBlur(image, (5, 5), 0)
    gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
    lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
    thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY)[1]
        
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    
    cl = ColorLabeler()    
    sd = ShapeDetector()
    
    for c in cnts:
        if cv2.contourArea(c)< 800:
            continue
        else:
            M = cv2.moments(c)
            cX = int((M["m10"] / M["m00"]))
            cY = int((M["m01"] / M["m00"]))
            shape = sd.detect(c)
            color = cl.label(lab, c)
            print(shape)
            print(color)
           
            if (shape=="square" or shape=="rectangle"):
                if (color==nc):
                    text = "{}".format(color)
                    cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
                    cv2.putText(image, text, (cX, cY),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                    cont=cont+1
                    cv2.imshow("Image", image)
                    cv2.waitKey(0)
    return cont
def get_cropped_img(image):
    grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    grey = cv2.GaussianBlur(grey, (7, 7), 0)

    edged = cv2.Canny(grey, 50, 100)
    edged = cv2.dilate(edged, None, iterations=1)
    edged = cv2.erode(edged, None, iterations=1)

    # cv2.imshow('edged', edged)
    # find contours in the edge map
    cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    largest_contour_area = 1000000000
    largest_contour = None
    for (i, c) in enumerate(cnts):
        # if the contour is not sufficiently large, ignore it
        if cv2.contourArea(c) < 100:
            continue
        if cv2.contourArea(c) < largest_contourArea:
            largest_contour_area = cv2.contourArea(c)
            largest_contour = c

        # bounding_box = cv2.boundingRect(c)
        # x1, y1, h, w = bounding_box
        # cv2.drawContours(image, c, -1, (0, 255, 0), 20)
        # # cv2.rectangle(image, (x1, y1), (x1 + h, y1 + w), (0, 255, 0), 2)
        # # image = image[bounding_box[2]:bounding_box[0], bounding_box[3]:bounding_box[1]]

    if largest_contour is not None:
        bounding_box = cv2.boundingRect(largest_contour)
        x1, y1, h, w = bounding_box
        # cv2.drawContours(image, bounding_box, -1, (0, 255, 0), -1)
        cv2.rectangle(image, (x1, y1), (x1+ h, y1+w), (255, 255, 0), 5)
        image = image[y1:y1+w, x1:x1+h]
        return image
    return None
예제 #17
0
    def scaleAndCrop(self, img, out_width):
        # scale
        resized = imutils.resize(img, width=out_width)

        # crop where?
        grey = cv2.cvtColor(resized,cv2.COLOR_BGR2GRAY)
        [ret, thresh] = cv2.threshold(grey,10,255,cv2.THRESH_BINARY)
        #TODO: solve isssue : x,y,w,h = cv2.boundingRect(cnt) , TypeError: points is not a numpy array, neither a scalar

        # check to see if we are using OpenCV 2.X
        if imutils.is_cv2():
            [contours, hierarchy] = cv2.findContours(thresh, 1, 2)
            cnt = contours[0]

        # check to see if we are using OpenCV 3
        elif imutils.is_cv3():
            (_, cnts, _) = cv2.findContours(thresh, 1, 2)
            cnt = cnts[0]

        #x,y,w,h = cv2.boundingRect(cnt)
        [x, y, w, h] = cv2.boundingRect(cnt)
        crop = resized[y:y + h, x:x + w]
        return crop
예제 #18
0
파일: shot.py 프로젝트: karlisle/kharl
    def preProsses(self, frame):

        resized = imutils.resize(frame, width=300)
        ratio = frame.shape[0] / float(resized.shape[0])
        print(ratio)

        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        blurresd = cv2.GaussianBlur(gray_frame, (5,5), 0)
        #bilateral = cv2.bilateralFilter(gray_frame, 9, 90, 90)
        thresh = cv2.threshold(blurresd, 70, 255, cv2.THRESH_BINARY)[1]
        #canny  = cv2.Canny(blurresd, 50, 200)

        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]



        for c in cnts:

            M = cv2.moments(c)
            print(M)
            cX = int((M["m10"] / M["m00"]) * ratio)
            cY = int((M["m01"] / M["m00"]) * ratio)
            shape = self.detector(c)

            c = ratio

            cv2.drawContours(frame, c[0], -1, (0, 255, 0), 2)
            cv2.putText(frame, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            cv2.imshow("Frame", frame)
            pass

        cv2.imshow("Frame", thresh)


        pass
예제 #19
0
def shapes(request):

	# load the image, convert it to grayscale, blur it slightly,
	# and threshold it
	image = cv2.imread('octa.png')
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	blurred = cv2.GaussianBlur(gray, (5, 5), 0)
	thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
	result = 'result.png'

	# find contours in the thresholded image
	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
		cv2.CHAIN_APPROX_SIMPLE)
	cnts = cnts[0] if imutils.is_cv2() else cnts[1]
	arr = []
	# loop over the contours
	for c in cnts:
		# compute the center of the contour
		M = cv2.moments(c)
		if (M["m00"] == 0):
			M["m00"]=1
		cX = int(M["m10"] / M["m00"])
		cY = int(M["m01"] / M["m00"])

		# draw the contour and center of the shape on the image

		cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
		cv2.circle(image, (cX, cY), 7, (0, 255, 0), -1)
		cv2.putText(image, "center", (cX - 20, cY - 20),
			cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)



	cv2.imwrite(result, image)
	result_data = open(result, "rb").read()
	return HttpResponse(result_data, content_type="image/png")
    def remove_blobs(self, full_image, resized_image, gray, ratio, show_plots=False):
        if show_plots:
            cv2.imshow("Thresh2", gray)
            cv2.waitKey(0)

        cnts = cv2.findContours(gray.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # loop over the contours
        
        hsv = cv2.cvtColor(resized_image, cv2.COLOR_BGR2HSV)
        # minv = 1000
        # for c in cnts:
        #     mask = np.zeros(gray.shape,np.uint8)
        #     cv2.drawContours(mask,[c],0,255,-1)
        #     mean_val = np.array(cv2.mean(hsv,mask = mask))
        #     minv = min(mean_val[2], minv)
        # print minv

        for c in cnts:
            mask = np.zeros(gray.shape,np.uint8)
            cv2.drawContours(mask,[c],0,255,-1)
            mean_val = np.array(cv2.mean(hsv,mask = mask))
            if np.max(mean_val) < 100 and cv2.contourArea(c) > 5000:
                continue
            else:
                print cv2.contourArea(c)
                if cv2.contourArea(c) < 5000:
                    cv2.drawContours(gray, [c], -1, (0, 0, 0), -1)
                # else:
                    # pass
        if show_plots:
            cv2.imshow("a", gray)
            cv2.waitKey(0)
        return gray
예제 #21
0
def calculate_point(filename, name, ANSWER_KEY = []):
    # # construct the argument parse and parse the arguments
    # ap = argparse.ArgumentParser()
    # ap.add_argument("-i", "--image", required=True,
    #     help="path to the input image")
    # args = vars(ap.parse_args())

    # define the answer key which maps the question number
    # to the correct answer


    # load the image, convert it to grayscale, blur it
    # slightly, then find edges
    image = cv2.imread('{}/media/{}'.format(settings.BASE_DIR, filename))
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(blurred, 75, 200)
    print('step 1')
    # find contours in the edge map, then initialize
    # the contour that corresponds to the document
    cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
    	cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    docCnt = None
    print('step 2')
    # ensure that at least one contour was found
    found = False
    if len(cnts) > 0:
    	# sort the contours according to their size in
    	# descending order
    	cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        print('step 3')
    	# loop over the sorted contours
    	round = 0
    	for c in cnts:
    		# approximate the contour
    		peri = cv2.arcLength(c, True)
    		approx = cv2.approxPolyDP(c, 0.02 * peri, True)

    		# if our approximated contour has four points,
    		# then we can assume we have found the paper
    		if len(approx) == 4:
    			docCnt = approx
    			if found:
				color = (0, 255, 0)
 				cv2.drawContours(image, [docCnt], -1, color, 3)
    				break
    			found = True

    cv2.imwrite('media/mask/{}-image.png'.format(name), image)
    print('step 4')
    # apply a four point perspective transform to both the
    # original image and grayscale image to obtain a top-down
    # birds eye view of the paper


    paper = four_point_transform(image, docCnt.reshape(4, 2))
    warped = four_point_transform(gray, docCnt.reshape(4, 2))
    correct_paper = four_point_transform(image, docCnt.reshape(4, 2))

    paper = cv2.resize(paper, (1024, 961))
    warped = cv2.resize(warped, (1024, 961))
    correct_paper = cv2.resize(correct_paper, (1024, 961))

    print('step 5')
    # apply Otsu's thresholding method to binarize the warped
    # piece of paper
    thresh = cv2.threshold(warped, 0, 255,
    	cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # store mask and bird view paper
    cv2.imwrite('media/mask/{}-bird.png'.format(name), paper)
    cv2.imwrite('media/mask/{}-mask.png'.format(name), thresh)


    # find contours in the thresholded image, then initialize
    # the list of contours that correspond to questions
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
    	cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    questionCnts = []
    print('step 6')
    # loop over the contours
    count = 0
    color = (0, 255, 0)
    for c in cnts:
    	# compute the bounding box of the contour, then use the
    	# bounding box to derive the aspect ratio
    	(x, y, w, h) = cv2.boundingRect(c)
    	ar = w / float(h)

    	# in order to label the contour as a question, region
    	# should be sufficiently wide, sufficiently tall, and
    	# have an aspect ratio approximately equal to 1
        if w >= 15 and h >= 15 and w <= 40 and h <= 40:
            # if count >= 480 and count < 500:
            cv2.drawContours(paper, [c], -1, color, 3)
            questionCnts.append(Contour(y, c))
            count = count +1

    def sort_by_x(countour):
        return countour.x

    questionCnts = sorted(questionCnts, key=sort_by_x, reverse=False)
    questionCnts = [c.cnt for c in questionCnts]
    cv2.imwrite('media/mask/{}-circle.png'.format(name), paper)
    #
    # # sort the question contours top-to-bottom, then initialize
    # # the total number of correct answers
    # # questionCnts = contours.sort_contours(questionCnts,
    # # 	method="top-to-bottom")[0]
    print('step 7')
    correct = 0
    print(len(questionCnts))
    if(len(questionCnts) != 500):
        raise ValueError('Wrong number of answer.')
    # each question has 5 possible answers, to loop over the
    # question in batches of 5

    for (q, i) in enumerate(np.arange(0, len(ANSWER_KEY), 1)):
    	# draw the outline of the correct answer on the test
    	# sort the contours for the current question from
    	# left to right, then initialize the index of the
    	# bubbled answer
    	start = matched[str(i+1)]['start']
        no = i+1
        rownum = 0
        startInRow = 0
        if no >= 1 and no <= 25:
            rownum = start
            startInRow = 0
        elif no >= 26 and no <= 50:
            rownum = start - 5
            startInRow = 5
        elif no >= 51 and no <= 75:
            rownum = start - 10
            startInRow = 10
        elif no >= 76 and no <= 100:
            rownum = start - 15
            startInRow = 15
        cnts = []
        rowCnts = questionCnts[rownum: rownum + 20]
        for c in rowCnts:
            (x, y, w, h) = cv2.boundingRect(c)
            cnts.append(Contour(x, c))

        cnts = sorted(cnts, key=sort_by_x, reverse=False)
        cnts = cnts[startInRow: startInRow + 5]
        cnts = [c.cnt for c in cnts]
        # for c in cnts:
        #     cv2.drawContours(paper, [c], -1, color, 3)
        # cv2.imwrite('media/mask/{}-answer.png'.format(name), paper)

    	bubbled = None
    	color = (0, 0, 255)
    	count_chosen = 0
        chosen_list = []
        # # loop over the sorted contours
    	for (j, c) in enumerate(cnts):

    		# construct a mask that reveals only the current
    		# "bubble" for the question
    		mask = np.zeros(thresh.shape, dtype="uint8")
    		cv2.drawContours(mask, [c], -1, 255, -1)

    		# apply the mask to the thresholded image, then
    		# count the number of non-zero pixels in the
    		# bubble area
    		mask = cv2.bitwise_and(thresh, thresh, mask=mask);
    		total = cv2.countNonZero(mask); print('========'); print((total/len(mask)) * 100)
    		# if the current total has a larger number of total
    		# non-zero pixels, then we are examining the currently
    		# bubbled-in answer
    		if (total/len(mask)) * 100 > PERCENT_CHOSEN:
    			count_chosen = count_chosen + 1
    			chosen_list.append(j + 1)
    		if bubbled is None or total > bubbled[0]:
    			bubbled = (total, j + 1)

        # initialize the contour color and the index of the
    	# *correct* answer
    	k = ANSWER_KEY[q]['correct']

        color = (0, 0, 255);print('-----------')
    	if k == str(bubbled[1]) and count_chosen is 1:
            color = (0, 255, 0)
            correct += 1
    	if count_chosen is 0:
            pass
    	elif count_chosen is 1:
            cv2.drawContours(correct_paper, [cnts[bubbled[1]-1]], -1, color, 3)
    	elif count_chosen > 1:
            for cir in chosen_list:
                cv2.drawContours(correct_paper, [cnts[cir-1]], -1, color, 3)
    cv2.imwrite('media/mask/{}-answer.png'.format(name), correct_paper)
    print('step 8')
    return correct
def readAndGenerateInstanceSegmentation(outputPath, transformers, imagePath):
    image = cv2.imread(imagePath)
    name = imagePath.split(os.path.sep)[-1]
    labelPath = '/'.join(imagePath.split(
        os.path.sep)[:-1]) + "/" + name[0:name.rfind(".")] + ".txt"
    maskLabels = []
    (h, w) = image.shape[:2]
    with open(labelPath) as f:
        data = json.load(f)

    annotations = data['annotations']

    for annotation in annotations:
        mask = np.zeros((w, h), dtype="uint8")
        key = list(annotation)[0]

        if key == 'rectangle':
            x = annotation['rectangle']['x']
            y = annotation['rectangle']['y']
            width = annotation['rectangle']['width']
            height = annotation['rectangle']['height']
            label = annotation['rectangle']['label']
            cv2.rectangle(mask, (x, y), (x + width, y + height), 255, -1)
            maskLabels.append((mask, label))

        if key == 'circle':
            x = annotation['circle']['x']
            y = annotation['circle']['y']
            diameter = annotation['circle']['radius']
            label = annotation['circle']['label']
            cv2.circle(mask, (int(x + diameter / 2), int(y + diameter / 2)),
                       int(diameter / 2), 255, -1)
            maskLabels.append((mask, label))

        if key == 'polygon':
            xpoints = annotation['polygon']['xpoints']
            ypoints = annotation['polygon']['ypoints']
            label = annotation['polygon']['label']
            pts = np.array([[x, y] for x, y in zip(xpoints, ypoints)],
                           np.int32)
            pts = pts.reshape((-1, 1, 2))
            cv2.fillPoly(mask, [pts], True, 255)

            maskLabels.append((mask, label))

    for (j, transformer) in enumerate(transformers):
        (newimage, newmasklabels) = transformer.transform(image, maskLabels)

        cv2.imwrite(outputPath + str(j) + "_" + name, newimage)
        newSegmentations = []
        for (mask, label) in newmasklabels:
            cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]
            segmentation = [[x[0][0], x[0][1]] for x in cnts[0]]
            # Closing the polygon
            segmentation.append(segmentation[0])

            newSegmentations.append((label, segmentation))
        data = {}
        data['name'] = name
        data['width'] = w
        data['height'] = h
        data['annotations'] = []
        for (l, segmentation) in newSegmentations:
            xpoints = [int(x[0]) for x in segmentation]
            ypoints = [int(x[1]) for x in segmentation]
            data['annotations'].append({
                'polygon': {
                    'ypoints': ypoints,
                    'xpoints': xpoints,
                    'label': label
                }
            })

        with open(outputPath + str(j) + "_" + name[0:name.rfind(".")] + ".txt",
                  'w') as outfile:
            json.dump(data, outfile)
예제 #23
0
# loads image
# grayscale for fun
# blur to filter out noise
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5,5), 0)          # a 5x5 kernel is applied
# threshold turns image into binary
# if intensity of a pixel is larger than 60,
# replace the value with 255, which is white in OpenCV
intensityThreshold = 60
thresh = cv2.threshold(blurred, intensityThreshold, 255, cv2.THRESH_BINARY)[1]

# now find the contours
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]     # they change the position in OpenCV3

# defines what shape an object is
sd = ShapeDetector()

print "// Escape key = Exit; Any other key = Continue //"

# process on each contour
for cnt in cnts:
    # object center
    M = cv2.moments(cnt)
    cntX = int(M["m10"] / M["m00"])
    cntY = int(M["m01"] / M["m00"])
    # detect shape type
    shapeName = sd.detect(cnt)
    if shapeName is None: shapeName = ""
예제 #24
0
# import the necessary packages
from __future__ import print_function
import imutils
import cv2

# print the current OpenCV version on your system
print("Your OpenCV version: {}".format(cv2.__version__))

# check to see if you are using OpenCV 2.X
print("Are you using OpenCV 2.X? {}".format(imutils.is_cv2()))

# check to see if you are using OpenCV 3.X
print("Are you using OpenCV 3.X? {}".format(imutils.is_cv3()))
예제 #25
0
def init_props(cap, config):
    '''
    set and store the real camera properties
    '''

    prop = None
    if imutils.is_cv3():
        if hasattr(cv2, "CAP_PROP_FPS"):
            prop = cv2.CAP_PROP_FPS
    elif imutils.is_cv2():
        if hasattr(cv2.cv, "CV_CAP_PROP_FPS"):
            prop = cv2.cv.CV_CAP_PROP_FPS

    if prop is None:
        print("OpenCV not compiled with camera framerate property!")
    else:
        try:
            cap.set(prop, config.camera.fps)
            config.camera.fps = cap.get(prop)
        except:
            print(
                "Unable to set framerate to {:.1f}!".format(config.camera.fps))
            config.camera.fps = cap.get(prop)
        finally:
            print("--  framerate: {}".format(config.camera.fps))

    # set the resolution as specified at the top
    prop = [None, None]
    if imutils.is_cv3():
        if hasattr(cv2, "CAP_PROP_FRAME_WIDTH"):
            prop = [cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT]
    elif imutils.is_cv2():
        if hasattr(cv2.cv, "CV_CAP_PROP_FRAME_WIDTH"):
            prop = [cv2.cv.CV_CAP_PROP_FRAME_WIDTH,
                    cv2.cv.CV_CAP_PROP_FRAME_HEIGHT]

    if any(p is None for p in prop):
        print("OpenCV not compiled with camera resolution properties!")
    else:
        try:
            for i in range(1, 2):
                cap.set(prop[i], config.camera.res[i])
                config.camera.res[i] = int(cap.get(prop[i]))
        except:
            print(
                "Unable to set resolution to {}x{}!".format(*config.camera.res))
            config.camera.res = [int(cap.get(p)) for p in prop]
        finally:
            print("--  resolution: {}x{}".format(*config.camera.res))

    # try to find the fourcc of the attached camera
    prop = None
    if imutils.is_cv3():
        if hasattr(cv2, "CAP_PROP_FOURCC"):
            prop = cv2.CAP_PROP_FOURCC
    elif imutils.is_cv2():
        if hasattr(cv2.cv, "CV_CAP_PROP_FOURCC"):
            prop = cv2.cv.CV_CAP_PROP_FOURCC

    if prop is None:
        print("OpenCV not compiled with fourcc property!")
    else:
        try:
            config.camera.fourcc = cap.get(prop)
        except:
            print("Unable to read camera's codec!")
        finally:
            print("--  fourcc: {}".format(config.camera.fourcc))
예제 #26
0
        # set initial positions
        cv2.setTrackbarPos(
            'Motion Hist.', config.window.name, config.computing.bg_sub_hist)
        cv2.setTrackbarPos(
            'Motion Thresh.', config.window.name, int(config.computing.bg_sub_thresh))

    cv2.createTrackbar('Processing Width', config.window.name,
                       100, config.camera.res[0], update_processing_width)
    cv2.setTrackbarPos(
        'Processing Width', config.window.name, config.computing.width)

# background subtractor
if imutils.is_cv3():
    fgbg = cv2.createBackgroundSubtractorMOG2(
        config.computing.bg_sub_hist, config.computing.bg_sub_thresh)
elif imutils.is_cv2():
    fgbg = cv2.BackgroundSubtractorMOG2(
        config.computing.bg_sub_hist, config.computing.bg_sub_thresh)


def get_motions(f, fMask, thickness=1, color=(170, 170, 170)):
    '''
    Iterates over the contours in a mask and draws a bounding box
    around the ones that encompas an area greater than a threshold.
    This will return an image of just the draw bock (black bg), and
    also an array of the box points.
    '''
    rects_mot = []
    f_rects = np.zeros(f.shape, np.uint8)
    # get contours
    if imutils.is_cv3():
예제 #27
0
# ap.add_argument("-t", "--target", required=True, help="path to the target image")
args = vars(ap.parse_args())

# Check that the files exist.
if not os.path.isfile(args['source']):
    print 'Source file', args['source'], 'does not exist.'
    sys.exit()

# Source: read, gray, blur, thresh, contour.
image = cv2.imread(args['source'])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

# Get the shape of the object and create an empty image.
height, width, dimensions = image.shape
Color = np.zeros((height, width, 3), 'uint8')

# Re-create the image (proof of concept).
for vv in range(0, height):
    for hh in range(0, width):
        if cv2.pointPolygonTest(cnts[0], (hh, vv), False) != -1:
            Color[vv][hh] = [255, 255, 0]

# Show the comparison.
plt.subplot(3, 2, 1)
plt.imshow(image, 'gray')
plt.subplot(3, 2, 2)
예제 #28
0
# perform edge detection, then perform a dilation + erosion to
# close gaps in between object edges
edged = cv2.Canny(img, 20,
                  50)  # Adjust these parameters according to your image
dilate = cv2.dilate(edged, None, iterations=1)
# We don't perform erosion, it completely depends on the image and need
# erode = cv2.erode(dilate, None, iterations=1)

# make an empty mask
mask = np.ones(img.shape[:2], dtype="uint8") * 255

# find contours
cnts = cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

orig = img.copy()
for c in cnts:
    # if the contour is not sufficiently large, ignore it
    if cv2.contourArea(c) < 200:
        cv2.drawContours(mask, [c], -1, 0, -1)

    x, y, w, h = cv2.boundingRect(c)

    # Filter and remove more contours according to your need
    if (w > h):
        cv2.drawContours(mask, [c], -1, 0, -1)

# Remove those ignored contours
newimage = cv2.bitwise_and(dilate.copy(), dilate.copy(), mask=mask)
def Begin(Type):
    avg = None
    count = 0
    flag = 0
    cnt = 0
    first = None
    detectflag = None
    while True:
        if Type == 0:
            frame = vs.read()
        elif Type == 1:
            if vs.more() == False:
                break
            frame = vs.read()
        if first is None:
            frame = imutils.resize(frame, width=500)
            r = selectRoi(frame)
            frameroi = frame[int(r[1]):int(r[1] + r[3]),
                             int(r[0]):int(r[0] + r[2])]
            grayf = cv2.cvtColor(frameroi, cv2.COLOR_BGR2GRAY)
            grayfb = cv2.GaussianBlur(grayf, (21, 21), 0)
            first = 1
        text = "Unoccupied"
        frame = imutils.resize(frame, width=500)
        frameroi = frame[int(r[1]):int(r[1] + r[3]),
                         int(r[0]):int(r[0] + r[2])]
        gray = cv2.cvtColor(frameroi, cv2.COLOR_BGR2GRAY)
        grayb = cv2.GaussianBlur(gray, (5, 5), 0)
        if avg is None:
            print("[INFO] starting background model...")
            avg = grayb.copy().astype("float")
            continue
        cv2.accumulateWeighted(grayb, avg, 0.6)
        frameDelta = cv2.absdiff(grayb, cv2.convertScaleAbs(avg))
        thresh = cv2.threshold(frameDelta, 10, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        for c in cnts:
            (x, y, w, h) = cv2.boundingRect(c)
            if w < 20 or h < 20:
                continue
            cv2.rectangle(frameroi, (x, y), (x + w, y + h), (0, 255, 0), 2)
            text = "Occupied"

        if len(cnts) != 0:
            cnt += 1
            flag = 1
            count = 0

        if len(cnts) == 0:
            count += 1
        if count >= 100:
            if flag == 0:
                grayf = gray
                cnt = 0
            else:
                flag = 0
                if cnt >= 50:
                    grayfb = cv2.GaussianBlur(grayf, (21, 21), 0)
                    grayb = cv2.GaussianBlur(gray, (21, 21), 0)
                    frameD = cv2.absdiff(grayb, grayfb)
                    threshd = cv2.threshold(frameD, 35, 255,
                                            cv2.THRESH_BINARY)[1]
                    threshd = cv2.dilate(threshd, None, iterations=2)

                    #cv2.imshow("thred",threshd)
                    cntsd = cv2.findContours(threshd.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
                    cntsd = cntsd[0] if imutils.is_cv2() else cntsd[1]
                    fram = frameroi.copy()
                    #cv2.drawContours(fram, cntsd, -1, (0,255,0), 3)
                    for c in cntsd:
                        (x, y, w, h) = cv2.boundingRect(c)
                        area = w * h
                        if area < 81:
                            continue

                        cv2.rectangle(fram, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        image1 = grayf[int(y):int(y + h), int(x):int(x + w)]
                        image2 = gray[int(y):int(y + h), int(x):int(x + w)]
                        s = ssim(image1, image2)
                        if s < 0.4:
                            if detectflag == None:
                                print("object is detected")
                                detectflag = 1
                            cv2.rectangle(frameroi, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            continue
                        if area > 2500:

                            c = compare(image1, image2)
                            if c >= 3:
                                if detectflag == None:
                                    print("object is detected")
                                    detectflag = 1
                                cv2.rectangle(frameroi, (x, y), (x + w, y + h),
                                              (0, 255, 0), 2)
                        #cv2.imshow("Security", frame
                    detectflag = None

                    cv2.imshow("Security", frameroi)
                    #cv2.imshow("Sec", fram)
                    cv2.waitKey(1)

        cv2.imshow("Security Feed", frameroi)
        cv2.imshow("thresh", thresh)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
def detect_color_and_shape(image_file):
    '''
    Finds the shape and color of each tile in the puzzle.
    Returns these values in a 2d list.
    '''

    new_graph = [[[] for _ in range(9)] for _ in range(9)]

    #my_image = 'bc_myedit.jpg'
    image = cv2.imread(image_file)

    # blur the resized image slightly, then convert it to both
    # graysale and the L*a*b* color space

    blurred = cv2.GaussianBlur(image, (5, 5), 0)
    gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
    lab = cv2.cvtColor(blurred, cv2.COLOR_BGR2LAB)
    thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)[1]
    binary = cv2.bitwise_not(thresh)

    cv2.imshow('image', binary)
    cv2.waitKey(0)

    # find contours in the thresholded image
    cnts = cv2.findContours(binary.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # make copy of image
    image_copy = np.copy(image)

    # find contours of large enough area
    min_cnt_area = 750
    large_cnts = [cnt for cnt in cnts if cv2.contourArea(cnt) > min_cnt_area]

    # draw contours
    cv2.drawContours(image_copy, large_cnts, -1, (255, 0, 0))

    # init
    sd = ShapeDetector()
    cl = ColorLabeler()

    # loop over cnts

    for c in large_cnts:

        # compute the center of the contour

        M = cv2.moments(c)
        try:
            cX = int((M['m10'] / M['m00']))
            cY = int((M['m01'] / M['m00']))
        except:
            cX = 0
            cY = 0

        # detect the shape of the contour and label the color

        shape = sd.detect(c)
        color = cl.label(lab, c)

        # draw the contours and the name of the shape and labeled
        # color on the image

        c = c.astype('float')
        c = c.astype('int')
        txt_color = color
        txt_shape = shape
        #text = '{} {}'.format(color, shape)
        cv2.drawContours(image, [c], -1, (0, 0, 0), 2)
        cv2.putText(image, txt_color, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
        cv2.putText(image, txt_shape, (cX, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)

        y_graph = int(round(cX / 80))
        x_graph = int(round(cY / 80))
        new_graph[x_graph][y_graph] = [color, shape]

    # show image

    cv2.imshow('image', image)
    cv2.waitKey(0)

    return new_graph
예제 #31
0
def CamMovement(qc):
    global Cam_Run
    firstFrame = None
    hits = 0  # counter for us to cycle over files

    CHNG_THRESH = 65  # Change Threshold used to be 25

    HR_Cam = qc.get()

    cc = qc.get()
    i = 0

    vs = []  # init VS array

    while i < cam_count:
        vs.append(cv2.VideoCapture(i))
        if not vs[i].isOpened():
            print('Could not open webcam #' + str(i) + ' \n')
            vs[i].release()
            vs.pop(i)
            i = i - 1
            break
        i = i + 1

    signal.signal(signal.SIGTERM, sigterm_cam)
    signal.signal(signal.SIGINT, sigterm_cam)

    while Cam_Run:

        # grab the current frame and initialize the occupied/unoccupied
        retval, frame = vs[HR_Cam].read()
        text = "Unoccupied"

        # if the frame could not be grabbed, then we have reached the end
        # of the video

        if frame is None:
            break

    # resize the frame, convert it to grayscale, and blur it

        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the first frame is None, initialize it
        if firstFrame is None:
            firstFrame = gray
            continue

        # compute the absolute difference between the current frame and
        # first frame
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, CHNG_THRESH, 255,
                               cv2.THRESH_BINARY)[1]

        # dilate the thresholded image to fill in holes, then find contours
        # on thresholded image
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # loop over the contours
        Caption = "Empty"
        for c in cnts:

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            if (w > 10) and (h > 10):  # trying to eliminate tiny changes
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                text = "Occupied"
                Caption = text + ' !'

    # draw the text and timestamp on the frame
        cv2.putText(frame, "Room Status: {}".format(Caption), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame,
                    datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # show the frame
        if text == "Occupied":
            cv2.imwrite(str(hits) + '_Security' + '.png', frame)
            for x in range(cc):
                if x != HR_Cam:
                    retval, frame = vs[x].read()
                    Caption = str(x)
                    cv2.putText(frame, "Camera: {}".format(Caption), (10, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.putText(
                        frame,
                        datetime.datetime.now().strftime(
                            "%A %d %B %Y %I:%M:%S%p"),
                        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.35, (0, 0, 255), 1)
                    cv2.imwrite(
                        str(hits) + '_Cam' + str(x) + '_' + '.png', frame)
            hits = hits + 1
            qc.put([True, datetime.datetime.now()])

            sleep(4)  # give it 4 secs before you grab more frames

        if hits > 19:  # recycle videos so as not to eat space
            hits = 0
    i = 0
    while i < (cam_count - 1):
        vs[i].release()
        vs.pop(i)
        i = i + 1
예제 #32
0
def main():
    """Annotate images
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-i",
                      "--input",
                      required=True,
                      help="path to input directory of images")
    args.add_argument("-a",
                      "--annot",
                      required=True,
                      help="path to output directory of annotations")
    args = vars(args.parse_args())

    # grab the image paths then initialize the dictionary of character counts
    image_paths = list(paths.list_images(args["input"]))
    counts = {}

    # loop over the image paths
    for (i, image_paths) in enumerate(image_paths):
        # display an update to the user
        print("[INFO] processing image {}/{}".format(i + 1, len(image_paths)))
        try:
            # load the image and convert it to grayscale, then pad the image to ensure
            # digits caught on the border of the image are retained
            image = cv2.imread(image_paths)
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.copyMakeBorder(gray, 8, 8, 8, 8, cv2.BORDER_REPLICATE)
            # threshold the image to reveal the digits
            thresh = cv2.threshold(gray, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            # find contours in the image, keeping only the four largest ones
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0] if imutils.is_cv2() else cnts[1]
            cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4]
            # loop over the contours
            for contour in cnts:
                # compute the bounding box for the contour then extract the digit
                (x, y, w, h) = cv2.boundingRect(contour)
                roi = gray[y - 5:y + h + 5, x - 5:x + w + 5]
                # display the character, making it large enough for us to see,
                # then wait for a keypress
                cv2.imshow("ROI", imutils.resize(roi, width=28))
                key = cv2.waitKey(0)
                # if the '`' key is pressed, then ignore the character
                if key == ord("`"):
                    print("[INFO] ignoring character")
                    continue
                # grab the key that was pressed and construct the path the output directory
                key = chr(key).upper()
                dir_path = os.path.sep.join([args["annot"], key])
                # if the output directory does not exist, create it
                if not os.path.exists(dir_path):
                    os.makedirs(dir_path)
                # write the labeled character to file
                count = counts.get(key, 1)
                path = os.path.sep.join(
                    [dir_path, "{}.png".format(str(count).zfill(6))])
                cv2.imwrite(path, roi)
                # increment the count for the current key
                counts[key] = count + 1
        # we are trying to control-contour out of the script, so break from the loop (you still
        # need to press a key for the active window to trigger this)
        except KeyboardInterrupt:
            print("[INFO] manually leaving script")
            break
        # an unknown error has occurred for this particular image
        except BaseException:  # pylint: disable=broad-except
            print("[INFO] skipping image...")
예제 #33
0
def doc_scan(image_path):
    """
    Converts an image of a document into a top-down scan of it.

    This function uses the imutils, numpy, and opencv python library to
    take a path to a given image and automatically detect the corners of
    the document within the picture. The picture is then reshaped and
    transformed using the four_point_transform function of transform.py
    to an image of just the document. This is adapted from the
    www.pyimagesearch.com python scanner tutorial.

    The document within the picture must be a rectangle with 4 corners,
    otherwise contour selection will return an error.

    :param image_path:
        Path to the image to be scanned.
    :return: final_image -
        Top-down, full document version of the original image as an
        opencv image object.
    """

    # STEP 1 : Edge Detection
    # Load the image, get ratio of old to new height, clone & resize it.
    image = cv2.imread(image_path)
    ratio = image.shape[0] / NEW_IMG_HEIGHT
    original = image.copy()
    image = imutils.resize(image, height=int(NEW_IMG_HEIGHT))

    # convert to gray-scale, blur, and find edges.
    gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray_img = cv2.GaussianBlur(gray_img, (GAUSS_KERN, GAUSS_KERN), SIGMA_X)
    img_edges = cv2.Canny(gray_img, THRESHOLD_BOT, THRESHOLD_TOP)

    # STEP 2 : Detecting Contours
    # Find the contours in the edge image, keep the largest contours, &
    # initialize the screen contour.
    contours = cv2.findContours(img_edges.copy(), cv2.RETR_LIST,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if imutils.is_cv2() else contours[1]
    contours = sorted(contours, key=cv2.contourArea, reverse=True)[:5]

    # Loop over contours
    for cont in contours:
        # Approximate the contour
        perimeter = cv2.arcLength(cont, True)
        approx_cont = cv2.approxPolyDP(cont, 0.02 * perimeter, True)

        # If the approximated contour has 4 pts, assume it's the screen.
        if len(approx_cont) == 4:
            screen_cont = approx_cont
            break
    else:
        raise Exception("Document not found, please try again")

    # STEP 3 : Apply Perspective Transform & Threshold
    # Apply four_point_transform to get top-down view of original image.
    top_down = four_point_transform(original,
                                    screen_cont.reshape(4, 2) * ratio)

    # Convert top-down image to gray-scale, and threshold it to give the
    # effect of a 'black and white' copy.
    top_down = cv2.cvtColor(top_down, cv2.COLOR_BGR2GRAY)
    threshold = threshold_local(top_down,
                                BLOCK_SIZE,
                                offset=OFFSET,
                                method=METHOD)
    top_down = (top_down > threshold).astype("uint8") * 255

    final_image = imutils.resize(top_down, height=FIN_IMG_HEIGHT)

    return final_image
예제 #34
0
def main():

    cap = cv2.VideoCapture(vid_path)
    status1, previous_frame = cap.read()
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
    fgbg = cv2.createBackgroundSubtractorMOG2()
    hsv = np.zeros_like(previous_frame)
    hsv[..., 1] = 255
    t = 20
    dc = 6
    start = 0
    i = 0

    while (i < total_frames - 1):
        ret, frame = cap.read()
        i = i + 1

        frame1 = frame.copy()
        current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        current_frame = cv2.GaussianBlur(current_frame, (var_blur, var_blur),
                                         0)

        # optical Flow
        flow = cv2.calcOpticalFlowFarneback(copy_frame, current_frame, None,
                                            0.5, 3, 15, 3, 5, 1.2, 0)
        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        grayscaled = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)

        retval2, binary_image2 = cv2.threshold(
            grayscaled, 125, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        # Background Subtraction
        binary_image3 = fgbg.apply(current_frame)

        # combination of two methods
        final_binary = cv2.bitwise_and(binary_image2, binary_image3)

        lab_val = 255
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(final_binary, connectivity=8,
                                             ltype=cv2.CV_32S)

        if lab_stats[1:, 4].size > 2:
            start = 1
            dc = dc + 1

            if dc > 6:
                dc = 0
                re = lab_stats[1:, 4].argsort()[-3:][::-1] + 1

                largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
                largest_mask[img_labeled == re[0]] = lab_val
                cnts1 = cv2.findContours(largest_mask.copy(),
                                         cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
                cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]

            cv2.putText(frame, 'Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (0, 255, 255), 1, cv2.LINE_AA)
            cv2.imshow('Frame', frame)
        else:
            t = t + 1
            if t > 40:
                if lab_stats[1:, 4].size > 0 and start == 1:
                    t = 0
                cv2.putText(frame, 'Not Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            else:
                cv2.putText(frame, 'Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            previous_frame = current_frame
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
    cap.release()
    cv2.destroyAllWindows()
예제 #35
0
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)

# try to determine the total number of frames in the video file
try:
    prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
     else cv2.CAP_PROP_FRAME_COUNT
    total = int(vs.get(prop))
    print("[INFO] {} total frames in video".format(total))

# an error occurred while trying to determine the total
# number of frames in the video file
except:
    print("[INFO] could not determine # of frames in video")
    print("[INFO] no approx. completion time can be provided")
    total = -1

    # loop over frames from the video file stream
while True:
    # read the next frame from the file
    (grabbed, frame) = vs.read()
def detectShapes(mask, image):
    X = 20
    Y = 20
    line = 0
    rectangle = 0
    circle = 0
    triangle = 0
    kernelopen = np.ones((5, 5), np.uint8)
    kernelClose = np.ones((3, 3), np.uint8)
    opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelopen)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernelClose)
    R = closing.shape[0]
    C = closing.shape[1]
    temp = closing[0:R, 0:C]
    edges = DoG(temp)

    ratio = image.shape[0] / float(image.shape[0])
    mx = np.amax(edges)
    mn = np.amin(edges)
    img = image
    thresh_val = np.average(np.arange(mn, mx))
    _, thresh = cv2.threshold(edges, thresh_val, 255, cv2.THRESH_BINARY)
    cv2.imshow("edge", thresh)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    #cnts=sorted(cnts, key=cv2.contourArea, reverse=True)

    for c in cnts:
        if cv2.arcLength(
                c,
                True) > 50:  #this is the condition to eliminate small curves
            shape = detect(c, img, cnts, thresh)
            if (shape == "triangle"):
                triangle += 1
                cv2.drawContours(img, c, -1, (0, 255, 0), 3)

                #triangle = shapes_count_safety_check(triangle)
            elif (shape == "circle"):
                circle += 1
                cv2.drawContours(img, c, -1, (189, 255, 30), 3)
                #circle = shapes_count_safety_check(circle)+
            elif (shape == "rectangle"):
                rectangle += 1
                cv2.drawContours(img, c, -1, (0, 0, 0), 3)

                #rectangle = shapes_count_safety_check(rectangle)
            elif (shape == "line"):
                line += 1
                cv2.drawContours(img, c, -1, (0, 0, 255), 3)

                #line = shapes_count_safety_check(line)

    cv2.putText(image, str(line), (X, Y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 255), 2)
    cv2.line(image, (X + 30, Y - 2), (X + 70, Y - 2), (0, 0, 255), 4)

    cv2.putText(image, str(circle), (X, Y + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                (0, 0, 255), 2)
    cv2.circle(image, (X + 40, Y + 35), 15, (0, 0, 255), -1)

    cv2.putText(image, str(rectangle), (X, Y + 80), cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (0, 0, 255), 2)
    cv2.rectangle(image, (X + 25, Y + 60), (X + 55, Y + 90), (0, 0, 255), -1)

    pt1 = (X + 40, Y + 100)
    pt2 = (X + 20, Y + 130)
    pt3 = (X + 60, Y + 130)
    triangle_cnt = np.array([pt1, pt2, pt3])
    cv2.drawContours(image, [triangle_cnt], 0, (0, 0, 255), -1)
    cv2.putText(image,
                str(triangle) + " ", (X, Y + 120), cv2.FONT_HERSHEY_SIMPLEX,
                0.5, (0, 0, 255), 2)
예제 #37
0
img2 = cv2.dilate(grayC, kernel)
img3 = cv2.erode(img2, kernel)
# img3 = cv2.erode(img2, kernel)

# kernel = np.ones((5, 5), np.uint8)
# img4 = cv2.erode(img3, kernel)
img4 = cv2.dilate(img3, kernel)
img5 = cv2.dilate(img4, kernel)
# kernel = np.ones((7, 7), np.uint8)
img6 = cv2.erode(img5, kernel)

thresh_new = cv2.threshold(img4, 0, 255,
                           cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh_new.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

time_3 = datetime.datetime.now()

max_size = 0
max_list = [0, 0, 0, 0]

my_array = np.zeros((480, 640), dtype=np.uint8)
for c in cnts:
    (x, y, w, h) = cv2.boundingRect(c)
    if w < 50 or h < 50:
        continue
    my_array[y:y + h, x:x + w] = 1

new_cnts = cv2.findContours(my_array.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
예제 #38
0
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)

# initialize the distance colors and reference object
(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (0, 165, 255), (255, 255, 0), (255, 0,
                                                                     255))
refObj = None

# loop over the contours individualy
for c in cnts:
    if cv2.contourArea(c) < 100:
        continue
    box = cv2.minAreaRect(c)
    box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
    box = np.array(box, dtype="int")
    box = perspective.order_points(box)
    cX = np.average(box[:, 0])
    cY = np.average(box[:, 1])
    if refObj is None:
        (tl, tr, br, bl) = box
        (tlblX, tlblY) = midpoint(tl, bl)
        (trbrX, trbrY) = midpoint(tr, br)
        D = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
        refObj = (box, (cX, cY), D / 70)
        continue
    orig = image.copy()
    cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
    cv2.drawContours(orig, [refObj[0].astype("int")], -1, (0, 255, 0), 2)
    refCoords = np.vstack([refObj[0], refObj[1]])
예제 #39
0
파일: CV.py 프로젝트: wyf0912/BOE-westsouth
    def run(self, result):
        '''输入一个参数result传递结果(cx, cy, flagLightFinded,cnt),cnt=(cnt++)%1000,用cnt来判断结果是否有更新'''
        with picamera.PiCamera() as camera:
            camera.resolution = (240, 160)
            camera.framerate = 30
            camera.iso = 400
            camera.awb_mode = 'off'
            camera.awb_gains = 1
            camera.shutter_speed = 4000
            # camera.start_recording('test.h264')
            stream = PiRGBArray(camera, size=(240, 160))
            timer = threading.Timer(1, self.showfps)
            timer.start()
            inf = 666666666
            cnt = 0
            for frame in camera.capture_continuous(stream,
                                                   format="bgr",
                                                   use_video_port=True):
                cx = 0
                cy = 0
                src = frame.array
                # cv2.imshow('Capture',src)
                # cv2.imwrite('test.jpg',src)
                hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
                # grayscaled = cv2.cvtColor(src,cv2.COLOR_BGR2GRAY)
                mask = cv2.inRange(hsv, self.args_dict['lower_red'],
                                   self.args_dict['upper_red'])
                # mask_2 = cv2.inRange(hsv, lower_red_1, upper_red_1)
                # mask = cv2.bitwise_or(mask_1,mask_2)
                # blur = cv2.GaussianBlur(imageG,(5,5),0)
                # retval,fixed=cv2.threshold(imageG,150,255,cv2.THRESH_BINARY)
                kernel = np.ones((15, 15), np.uint8)
                # mask = cv2.erode(mask,kernel)
                mask = cv2.dilate(mask, kernel, 1)

                # Find the Middle of the Light Blur
                cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = cnts[0] if imutils.is_cv2() else cnts[1]
                distance = [None] * len(cnts)
                i = 0
                for c in cnts:
                    M = cv2.moments(c)
                    cX = int(M["m10"] / (M["m00"] + 1))
                    cY = int(M["m01"] / (M["m00"] + 1))

                    if [cX, cY] != [0, 0]:
                        distance[i] = (cX - 104) * (cX - 104) + (160 - cY) * (
                            160 - cY)
                    else:
                        distance[i] = inf
                        i = i + 1
                flagLightFinded = 0
                if distance != []:
                    M = cv2.moments(cnts[distance.index(min(distance))])
                    cx = round((M["m10"] / (M["m00"] + 1)) / 240.0 * 128)
                    cy = round((M["m01"] / (M["m00"] + 1)) / 160.0 * 128)
                    flagLightFinded = 1

                    cv2.rectangle(src, (cX - 40, cY - 30), (cX + 40, cY + 30),
                                  (0, 255, 0), 4)
                # str = "A%d,%d,%dFF " % (cx, cy, flagLightFinded);
                self.result = [cx, cy, flagLightFinded, cnt]
                cnt = (cnt + 1) % 1000
                # ser.write('A100,100,1FF ')
                # print(str)

                if self.gui.imshow_flag:
                    cv2.imshow('Mask', mask)
                    # cv2.imwrite('test_1.jpg',src);
                    cv2.imshow('Image', src)

                self.fps = self.fps + 1
                stream.truncate(0)

                cv2.waitKey(1)
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--new", type=int, default=-1,
	help="whether or not the new order points should should be used")
args = vars(ap.parse_args())

image = cv2.imread("Res/X_windows.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)

edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)

cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]

(cnts, _) = contours.sort_contours(cnts)
colors = ((0, 0, 255), (240, 0, 159), (255, 0, 0), (255, 255, 0))

for (i, c) in enumerate(cnts):

	if cv2.contourArea(c) < 100:
		continue

	box = cv2.minAreaRect(c)
	box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
	box = np.array(box, dtype="int")
	cv2.drawContours(image, [box], -1, (0, 255, 0), 2)

	# show the original coordinates
    image = cv2.imread(image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Add some extra padding around the image
    image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)

    # threshold the image (convert it to pure black and white)
    thresh = cv2.threshold(image, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # find the contours (continuous blobs of pixels) the image
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

    # Hack for compatibility with different OpenCV versions
    contours = contours[0] if imutils.is_cv2() else contours[1]

    letter_image_regions = []

    # Now we can loop through each of the four contours and extract the letter
    # inside of each one
    for contour in contours:
        # Get the rectangle that contains the contour
        (x, y, w, h) = cv2.boundingRect(contour)

        # Compare the width and height of the contour to detect letters that
        # are conjoined into one chunk
        if w / h > 1.25:
            # This contour is too wide to be a single letter!
            # Split it in half into two letter regions!
            half_width = int(w / 2)
예제 #42
0
    def process_image(self, frame):
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.medianBlur(gray, 5)

        # Get Parameters from ROS interface
        MinThreshold = rospy.get_param('~MinThreshold')
        MaxThreshold = rospy.get_param('~MaxThreshold')
        MinAreaThreshold = rospy.get_param('~MinAreaThreshold')
        ReferenceMeasure = rospy.get_param('~ReferenceMeasure')

        # perform edge detection, then perform a dilation + erosion to
        # close gaps in between object edges
        edged = cv2.Canny(gray, MinThreshold, MaxThreshold)

        kernel = np.ones((3, 3), np.uint8)
        edged = cv2.dilate(edged, kernel, iterations=1)

        # find contours in the edge map
        cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # sort the contours from left-to-right and initialize the
        # 'pixels per metric' calibration variable
        (cnts, _) = contours.sort_contours(cnts)

        # compute the rotated bounding box of the contour
        processed_frame = frame.copy()

        # loop over the contours individually
        for c in cnts:

            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < MinAreaThreshold:
                continue

            hull = cv2.convexHull(c, returnPoints=True)

            # compute the rotated bounding box of the contour
            box = cv2.minAreaRect(hull)
            box = cv2.cv.BoxPoints(box)
            box = np.array(box, dtype="int")

            # order the points in the contour such that they appear
            # in top-left, top-right, bottom-right, and bottom-left
            # order, then draw the outline of the rotated bounding

            # box
            box = perspective.order_points(box)
            cv2.drawContours(processed_frame, [
                box.astype("int")], -1, (0, 0, 255), 2)

            # loop over the processed_frame points and draw them
            for (x, y) in box:
                cv2.circle(processed_frame, (int(x), int(y)),
                           5, (0, 0, 255), -1)

            # unpack the ordered bounding box, then compute the midpoint
            # between the top-left and top-right coordinates, followed by
            # the midpoint between bottom-left and bottom-right coordinates
            (tl, tr, br, bl) = box
            (tltrX, tltrY) = self.midpoint(tl, tr)
            (blbrX, blbrY) = self.midpoint(bl, br)

            # compute the midpoint between the top-left and top-right points,
            # followed by the midpoint between the top-righ and bottom-right
            (tlblX, tlblY) = self.midpoint(tl, bl)
            (trbrX, trbrY) = self.midpoint(tr, br)

            # if the contour is not sufficiently large, ignore it
            if cv2.contourArea(c) < 10000:
                # draw the midpoints on the frame
                cv2.circle(processed_frame, (int(tltrX),
                                             int(tltrY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(blbrX),
                                             int(blbrY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(tlblX),
                                             int(tlblY)), 5, (255, 0, 0), -1)
                cv2.circle(processed_frame, (int(trbrX),
                                             int(trbrY)), 5, (255, 0, 0), -1)

                # draw lines between the midpoints
                cv2.line(processed_frame, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
                         (255, 0, 255), 2)
                cv2.line(processed_frame, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
                         (255, 0, 255), 2)

            # compute the Euclidean distance between the midpoints
            dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
            dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))

            # if the pixels per metric has not been initialized, then
            # compute it as the ratio of pixels to supplied metric
            # (in this case, inches)
            if self.pixelsPerMetric is None:
                self.pixelsPerMetric = dB / ReferenceMeasure

            # compute the size of the object
            dimA = dA / self.pixelsPerMetric
            dimB = dB / self.pixelsPerMetric

            if cv2.contourArea(c) < 10000:
                # draw the object sizes on the frame
                cv2.putText(processed_frame, "{:.1f}mm".format(dimA),
                            (int(tltrX - 15), int(tltrY - 10)
                             ), cv2.FONT_HERSHEY_SIMPLEX,
                            0.70, (0, 0, 255), 2)
                cv2.putText(processed_frame, "{:.1f}mm".format(dimB),
                            (int(trbrX + 10), int(trbrY)
                             ), cv2.FONT_HERSHEY_SIMPLEX,
                            0.70, (0, 0, 255), 2)

        return processed_frame
예제 #43
0
def select2():  # 정답 and 좌표찾기
    global answerSheet, position, answerList
    path = filedialog.askopenfilename()
    answerSheet = cv2.imread(path, 0)

    # 이미지 서로 다른 부분 찾는 코드    정확히 모름
    (score, diff) = compare_ssim(testSheet, answerSheet, full=True)
    diff = (diff * 255).astype("uint8")
    thresh = cv2.threshold(diff, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # 정답지 세로로 분리
    x1, y1, h1 = 0, 0, 0
    num = 0
    count = 0
    startX, startY = 0, 0
    img = []
    row = []
    row.append([])
    box = []
    for c in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        box.append(c)
        if count == 0:
            row[num].append(count)
            pass
        else:
            if abs(y1 - y) < 20:
                row[num].append(count)
            else:
                num = num + 1
                row.append([])
                row[num].append(count)
        y1 = y
        count += 1

    #정답지 세로로 분리된 것 중에 가로로 분리할 수 있는지 확인
    a, b, c, d = 0, 0, 0, 0
    for i in range(0, len(row)):
        minX = 10000
        minY = 10000
        maxX = 0
        maxY = 0

        for j in range(0, len(row[i])):
            a, b, c, d = cv2.boundingRect(box[row[i][j]])
            if minX > a:
                minX = a
            if maxY < b + d:
                maxY = b + d
            if maxX < a + c:
                maxX = a + c
            if minY > b:
                minY = b
        if abs(maxY - minY) < 5:
            pass
        else:
            img.append(
                answerSheet[minY:maxY,
                            minX:maxX])  # img == 최종 답안 단어들의 이미지를 저장한 리스트
    print(len(position))
    answerList = []

    # 이 코드가 실행하고 있는 위치에 answer이라는 폴더만들면 정답지가 answer폴더안에 저장       dir있는지 확인하고 없으면 만드는 코드로 수정
    for i in range(0, len(img)):
        #cv2.imshow(str(i), img[i])
        # resize = cv2.resize(img[i], None, fx=2.0, fy=2.0, interpolation=cv2.INTER_CUBIC + cv2.INTER_LINEAR)
        '''gau = cv2.GaussianBlur(resize, (5, 5), 0)
		temp = cv2.addWeighted(resize, 1.5, gau, -0.5, 0)

		kernel = np.ones((2,2), np.uint8)
		er = cv2.erode(temp, kernel, iterations=1)'''

        #cv2.imshow("zzz", er)
        #tessdata_dir_config = r'--tessdata-dir "<C:\Program Files (x86)\Tesseract-OCR\tessdata>"'

        #cv2.imwrite("answer\\"+"ss"+str(i) + ".jpg", er)
        result = pytesseract.image_to_string(img[i], lang='eng')
        result = result.replace(" ", "")
        result = str(result)
        answerList.append(result)
        print(result)

    if not (os.path.isdir("answerSheet")):
        os.makedirs(os.path.join("answerSheet"))

    f = open("answerSheet/answerList.txt", "w", -1, "utf-8")
    for i in range(0, len(answerList)):
        f.write(answerList[i] + "\n")
    f.close()
예제 #44
0
    def findContour(self, image):
        contour = cv2.findContours(image, cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_NONE)
        contour = contour[0] if imutils.is_cv2() else contour[1]

        return contour
#Hence p is a keypoint of the image (and it represents a corner in the image)

# import the necessary packages
from __future__ import print_function
import numpy as np
import cv2
import imutils

# load the image and convert it to grayscale
image = cv2.imread("./images/trex.png")
#image = cv2.imread("./images/grand_central_terminal.png")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# handle if we are detecting FAST keypoints in the image for OpenCV 2.4
if imutils.is_cv2():
    detector = cv2.FeatureDetector_create("FAST")
    kps = detector.detect(gray)
# otherwise, we are detecting FAST keypoints for OpenCV 3+
else:
    detector = cv2.FastFeatureDetector_create()
    kps = detector.detect(gray, None)
print("# of keypoints: {}".format(len(kps)))

# loop over the keypoints and draw them
for kp in kps:
    r = int(0.5 * kp.size)
    (x, y) = np.int0(kp.pt)
    cv2.circle(image, (x, y), r, (0, 255, 255), 2)
# show the image
cv2.imshow("Images", np.hstack([orig, image]))
for image_file in captcha_image_files:
    # Load the image and convert it to grayscale
    image = cv2.imread(image_file)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # Add some extra padding around the image
    image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_REPLICATE)

    # threshold the image (convert it to pure black and white)
    thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]

    # find the contours (continuous blobs of pixels) the image
    contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # Hack for compatibility with different OpenCV versions
    contours = contours[0] if imutils.is_cv2() else contours[1]

    letter_image_regions = []

    # Now we can loop through each of the four contours and extract the letter
    # inside of each one
    for contour in contours:
        # Get the rectangle that contains the contour
        (x, y, w, h) = cv2.boundingRect(contour)

        # Compare the width and height of the contour to detect letters that
        # are conjoined into one chunk
        if w / h > 1.25:
            # This contour is too wide to be a single letter!
            # Split it in half into two letter regions!
            half_width = int(w / 2)
예제 #47
0
charNames = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0",
	"T", "U", "A", "D"]
 
# load the reference MICR image from disk, convert it to grayscale,
# and threshold it, such that the digits appear as *white* on a
# *black* background
ref = cv2.imread(args["reference"])
ref = cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
ref = imutils.resize(ref, width=400)
ref = cv2.threshold(ref, 0, 255, cv2.THRESH_BINARY_INV |
	cv2.THRESH_OTSU)[1]
  # find contours in the MICR image (i.e,. the outlines of the
# characters) and sort them from left to right
refCnts = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL,
	cv2.CHAIN_APPROX_SIMPLE)
refCnts = refCnts[0] if imutils.is_cv2() else refCnts[1]
refCnts = contours.sort_contours(refCnts, method="left-to-right")[0]
# extract the digits and symbols from the list of contours, then
# initialize a dictionary to map the character name to the ROI
refROIs = extract_digits_and_symbols(ref, refCnts,
	minW=10, minH=20)[0]
chars = {}
 
# loop over the reference ROIs
for (name, roi) in zip(charNames, refROIs):
	# resize the ROI to a fixed size, then update the characters
	# dictionary, mapping the character name to the ROI
	roi = cv2.resize(roi, (36, 36)) 
	chars[name] = roi
  # initialize a rectangular kernel (wider than it is tall) along with
# an empty list to store the output of the check OCR
예제 #48
0
def main(_argv):
    # try access to GPU
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)
    # Create Yolo model (Tiny or complete)
    if FLAGS.tiny:
        # light version (faster processing)
        yolo = YoloV3Tiny(classes=FLAGS.num_classes)
    else:
        # Deeper more robust model
        yolo = YoloV3(classes=FLAGS.num_classes)

    # Load weights from a pretrained net
    yolo.load_weights(FLAGS.weights).expect_partial()
    logging.info('weights loaded')

    # Get classes names
    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')

    #Initialize deep sort.
    deepsort = deepsort_rbc()

    times = []

    try:
        vid = cv2.VideoCapture(int(FLAGS.video))
    except:
        vid = cv2.VideoCapture(FLAGS.video)

    out = None

    if FLAGS.output:
        # by default VideoCapture returns float instead of int
        width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = int(vid.get(cv2.CAP_PROP_FPS))
        codec = cv2.VideoWriter_fourcc(*"MJPG")
        out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))

    # try to determine the total number of frames in the video file
    try:
        prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
            else cv2.CAP_PROP_FRAME_COUNT
        total = int(vid.get(prop))
        logging.info("{} total frames in video".format(total))
        # print("[INFO] {} total frames in video".format(total))

    # an error occurred while trying to determine the total
    # number of frames in the video file
    except:
        logging.info("could not determine # of frames in video")
        logging.info("No approx. completion time can be provided")
        total = -1

    # number of frames counter
    cont = 0

    # Write Yolo features from each frame to `images.tfrecords`.
    #record_file = 'Test.tfrecords'
    #with tf.io.TFRecordWriter(record_file) as writer:
    while True:
        _, img = vid.read()

        if img is None:
            logging.warning("Empty Frame")
            time.sleep(0.1)
            break

        # print(cont)
        img_in = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img_in = tf.expand_dims(img_in, 0)
        img_in = transform_images(img_in, FLAGS.size)

        t1 = time.time()

        # use model to predict object bboxes
        t1 = time.time()
        ind, boxes, scores, classes, nums, feat = yolo(img_in)
        t2 = time.time()
        times.append(t2 - t1)
        times = times[-20:]

        # initialize our lists of detected bounding boxes, confidences,
        # and class IDs, respectively
        bboxes_p = []
        confidences = []
        classIDs = []
        feat_track = []
        scales = []
        ids = []

        # feature pre-location
        feat_all = []
        conti = 0

        for j, indi in enumerate(ind):
            if indi is not None:
                for i in indi:
                    classID = int(classes[0][int(i[4])])
                    if class_names[classID] == "car" or class_names[
                            classID] == "truck":
                        sco = np.array(scores[0][int(i[4])])
                        box_p = np.array(boxes[0][int(i[4])])
                        # logging.info('\t{}, {}, {}'.format(class_names[classID],
                        #                                    sco,box_p))

                        # Feature extraction
                        x, y = np.array(i[1:3])
                        feat_1 = feat[j][:, x, y, :][0]
                        feat_track.append(feat_1)
                        feat_all.append(
                            np.concatenate(
                                [
                                    feat_1,
                                    tf.expand_dims(classes[0][int(i[4])],
                                                   0),  # object class
                                    tf.expand_dims(i[4], 0)
                                ],
                                axis=0))  # id object in frame

                        # objects allocation
                        ids.append(conti)
                        conti += 1
                        scales.append(j)
                        confidences.append(float(sco))
                        bboxes_p.append(box_p)
                        classIDs.append(classID)

        # save output image
        # img2 = draw_output(img, (bboxes_p,confidences,classIDs,ids,
        #                       scales), class_names)
        # cv2.imshow('output', img2)
        # key = cv2.waitKey(0) & 0xFF
        # if key == ord('q'):
        #     break
        if FLAGS.save:
            # Save features to TFRecord
            if feat_all:
                t_feat_all = tf.convert_to_tensor(feat_all)
                # Process the frames into `tf.Example` messages.
                tf_example = frame_example(t_feat_all, cont)
                # Write to a `.tfrecords` file.
                writer.write(tf_example.SerializeToString())

        # ensure at least one detection exists
        if bboxes_p:

            # if cont == 46:
            #     print(cont)
            # feed deepsort with detections and features
            tracker, detections_class = deepsort.run_deep_sort(
                img, np.asarray(bboxes_p), confidences, classIDs, scales, ids,
                feat_track)
            classIDs_nms = detections_class[1]
            scales_nms = detections_class[2]
            ids_nms = detections_class[3]
            # prelocation employed detections
            boxes_nms = []
            sco_nms = []
            for det in detections_class[0]:
                # Append NMS detection boxes
                boxes_nms.append(det.to_tlbr())
                sco_nms.append(det.confidence)

            # prelocation of tracked boxes
            boxes_ds = []
            id_ds = []
            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                #Append boxes and id.
                boxes_ds.append(
                    track.to_tlbr())  #Get the corrected/predicted bounding box
                id_ds.append(str(
                    track.track_id))  #Get the ID for the particular track.

        # save output image
        img = draw_YOLO(
            img, (boxes_nms, sco_nms, classIDs_nms, ids_nms, scales_nms),
            class_names)
        if boxes_ds:
            img = draw_DS(img, boxes_ds, id_ds)
        img = cv2.putText(
            img, "Time: {:.2f}ms, frame:{:d}".format(
                sum(times) / len(times) * 1000, cont), (0, 30),
            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)

        if FLAGS.output:
            out.write(img)
        cv2.imshow('output', img)
        key = cv2.waitKey(100) & 0xFF
        if key == ord('q'):
            break
        cont += 1
    cv2.destroyAllWindows()
예제 #49
0
def select3():  # 학생들 정답 찾기 & 정답과 비교, 채점해서 출력
    global studentSheet
    studentAnswer = []
    path = filedialog.askopenfilename()
    studentSheet = cv2.imread(path, 0)
    if not (os.path.isdir("answer")):
        os.makedirs(os.path.join("answer"))
    '''for i in range(0,len(position)):
		studentAnswer.append(studentSheet[position[i][0]:position[i][1],position[i][2]:position[i][3]])
		cv2.imwrite("answer\\"+str(i)+".jpg",studentAnswer[i])'''

    #cv2.imshow("test",testSheet)
    #cv2.imshow("answer",answerSheet)
    #cv2.imshow("student",studentSheet)

    # 이미지 서로 다른 부분 찾는 코드    정확히 모름
    (score, diff) = compare_ssim(testSheet, studentSheet, full=True)
    diff = (diff * 255).astype("uint8")
    thresh = cv2.threshold(diff, 0, 255,
                           cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]

    # 정답지 세로로 분리
    x1, y1, h1 = 0, 0, 0
    num = 0
    count = 0
    startX, startY = 0, 0
    img2 = []
    row2 = []
    row2.append([])
    box2 = []
    for c in cnts:
        (x, y, w, h) = cv2.boundingRect(c)
        box2.append(c)
        if count == 0:
            row2[num].append(count)
            pass
        else:
            if abs(y1 - y) < 20:
                row2[num].append(count)
            else:
                num = num + 1
                row2.append([])
                row2[num].append(count)
        y1 = y
        count += 1

    zz = cv2.imread(path, 1)
    # 정답지 세로로 분리된 것 중에 가로로 분리할 수 있는지 확인
    a, b, c, d = 0, 0, 0, 0
    for i in range(0, len(row2)):
        minX = 10000
        minY = 10000
        maxX = 0
        maxY = 0

        for j in range(0, len(row2[i])):
            a, b, c, d = cv2.boundingRect(box2[row2[i][j]])
            if minX > a:
                minX = a
            if maxY < b + d:
                maxY = b + d
            if maxX < a + c:
                maxX = a + c
            if minY > b:
                minY = b
        if abs(maxY - minY) < 5:
            pass
        else:
            img2.append(
                studentSheet[minY:maxY,
                             minX:maxX])  # img == 최종 답안 단어들의 이미지를 저장한 리스트
            cv2.rectangle(zz, (minX, minY), (maxX, maxY), (0, 0, 255), 3)
            pos = []
            pos.append(minY)
            pos.append(maxY)
            pos.append(minX)
            pos.append(maxX)
            position.append(pos)
    cv2.imwrite("myanswe.jpg", zz)
    for i in range(0, len(img2)):
        cv2.imwrite("/Users/hcy/Desktop/GP/answer/" + "" + str(i) + ".jpg",
                    img2[i])
    os.chdir("/Users/hcy/Desktop/GP/src/")
    os.system('python main.py'
              )  # main.py 실행하면 answerImage에 있는 폴더 모두 실행, txt파일에 정답 저장

    # blank 처리 - len(answerList) - studentAnswerList
    #r = open('C:\\Users\yea\.spyder-py3\\answerSheet\\answerwordLists.txt','rt')
    with codecs.open('/Users/hcy/Desktop/GP/src/answerwordLists.txt',
                     'r') as r:
        while (1):
            line = r.readline()
            try:
                escape = line.index('\n')
            except:
                escape = len(line)

            if line:
                studentAnswer.append(line[0:escape].replace(" ", ""))
            else:
                break
    r.close()
    answerList1 = []
    with codecs.open('/Users/hcy/Desktop/GP/src/answerList.txt',
                     'r',
                     encoding='utf-8') as g:
        while (1):
            line = g.readline()
            try:
                escape = line.index('\r\n')
            except:
                escape = len(line)

            if line:
                answerList1.append(line[0:escape].replace(" ", ""))
            else:
                break
    g.close()

    print(answerList1)
    print(studentAnswer)

    score = len(studentAnswer)
    correctNum = np.zeros(len(answerList1))
    '''for i in range(0,len(studentAnswer)):
		correct = 0
		for j in range(0,len(answerList)):              # answerList는 순서대로 저장되 있으므로 j에 따라 채점
			if(studentAnswer[i] == answerList[j]):
				correct = 1
				correctNum[j]=1
				break
		if correct==0:ZZ
			print(studentAnswer[i])
			score-=1'''
    for item in studentAnswer:
        correct = 0
        for j in range(0, len(answerList1)):
            if eq(item, answerList1[j]):
                correct = 1
                correctNum[j] = 1
        if correct == 0:
            print(item)
            score -= 1
    print(score)
    print(correctNum)
    color = cv2.imread(path)
    for i in range(0, len(correctNum)):
        if (correctNum[i] == 1):
            print("correct!")
            #print(img2[i][0],img2[i][1])
            cv2.circle(color, (int((position[i][3] + position[i][2]) / 2),
                               int((position[i][0] + position[i][1]) / 2)), 30,
                       (0, 0, 255), 5)
            #cv2.circle(studentSheet,(int(x+w/2),int(y-h/2)),30,(0,0,255),-1)
        else:
            cv2.putText(color, " / ",
                        (int((position[i][3] + position[i][2]) / 2) - 70,
                         int((position[i][0] + position[i][1]) / 2) + 35),
                        cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 255), 5, cv2.LINE_AA)

    cv2.putText(color,
                str(score) + " / " + str(len(correctNum)), (1070, 1950),
                cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
    color = cv2.resize(color, (850, 850))
    cv2.imshow("Result", color)
    cv2.imwrite("Result.jpg", color)
    # cv2.circle(img,(447,63), 63, (0,0,255), -1)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
예제 #50
0
def main():

    cap = cv2.VideoCapture(vid_path)
    status1, previous_frame = cap.read()
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    copy_frame = cv2.cvtColor(previous_frame, cv2.COLOR_BGR2GRAY)
    fgbg = cv2.createBackgroundSubtractorMOG2()
    hsv = np.zeros_like(previous_frame)
    hsv[..., 1] = 255
    t = 20
    red = 30
    check_red = 1
    start = 0
    radiuce_up_limit = 60
    radiuce_low_limit = 30
    i = 0

    while (i < total_frames - 1):
        ret, frame = cap.read()
        i = i + 1

        frame1 = frame.copy()
        current_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
        current_frame = cv2.GaussianBlur(current_frame, (var_blur, var_blur),
                                         0)

        # frame differening
        frame_diff = cv2.absdiff(current_frame, copy_frame)

        ret, binary_image1 = cv2.threshold(frame_diff, 3, 255,
                                           cv2.THRESH_BINARY)

        # Background Subtraction
        binary_image3 = fgbg.apply(current_frame)

        # combination of two methods
        final_binary = cv2.bitwise_and(binary_image3, binary_image1)

        lab_val = 255
        n_labels, img_labeled, lab_stats, _ = \
            cv2.connectedComponentsWithStats(final_binary, connectivity=8,
                                             ltype=cv2.CV_32S)

        if check_red == 1:
            red = red + 10
            if red > radiuce_up_limit:
                check_red = 0
        else:
            red = red - 10
            if red == radiuce_low_limit:
                check_red = 1

        if lab_stats[1:, 4].size > 2:

            #        print(lab_stats[1:,4].size)
            re = lab_stats[1:, 4].argsort()[-2:][::-1] + 1

            largest_mask = np.zeros(final_binary.shape, dtype=np.uint8)
            largest_mask[img_labeled == re[0]] = lab_val
            cnts1 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
            cnts1 = cnts1[0] if imutils.is_cv2() else cnts1[1]

            largest_mask[img_labeled == re[1]] = lab_val
            cnts2 = cv2.findContours(largest_mask.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
            cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]

            if len(cnts2) > 1:
                X1 = cnts2[0][0]
                X2 = cnts2[1][0]

                cX1 = X1[0][0]
                cY1 = X1[0][1]
                cX2 = X2[0][0]
                cY2 = X2[0][1]

                # distance between obj1 and obj2
                dist1 = math.sqrt((cX1 - cX2)**2 + (cY1 - cY2)**2)

                if dist1 < 90:
                    cX2 = cX1
                    cY2 = cY1
                    radiuce_up_limit = 100
                else:
                    radiuce_up_limit = 60

            cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
            cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
            cv2.putText(frame, 'Breathing', (10, 40), cv2.FONT_HERSHEY_SIMPLEX,
                        1, (0, 255, 255), 1, cv2.LINE_AA)
            cv2.imshow('Frame', frame)
        else:
            t = t + 1
            if t > 40:
                if lab_stats[1:, 4].size > 0 and start == 1:

                    t = 0
                cv2.putText(frame, 'Not Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            else:
                cv2.circle(frame, (cX1, cY1), red, (0, 255, 255), 3)
                cv2.circle(frame, (cX2, cY2), red, (0, 255, 255), 3)
                cv2.putText(frame, 'Breathing', (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1,
                            cv2.LINE_AA)
                cv2.imshow('Frame', frame)
            previous_frame = current_frame
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break
    cap.release()
    cv2.destroyAllWindows()
예제 #51
0
def segment_chars(plate_img, fixed_width):
    """
    extract Value channel from the HSV format
    of image and apply adaptive thresholding
    to reveal the characters on the license plate
    """
    V = cv2.split(cv2.cvtColor(plate_img, cv2.COLOR_BGR2HSV))[2]

    thresh = cv2.adaptiveThreshold(value, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                   cv2.THRESH_BINARY, 11, 2)

    thresh = cv2.bitwise_not(thresh)

    # resize the license plate region to
    # a canoncial size
    plate_img = imutils.resize(plate_img, width=fixed_width)
    thresh = imutils.resize(thresh, width=fixed_width)
    bgr_thresh = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)

    # perform a connected components analysis
    # and initialize the mask to store the locations
    # of the character candidates
    labels = measure.label(thresh, neighbors=8, background=0)

    charCandidates = np.zeros(thresh.shape, dtype='uint8')

    # loop over the unique components
    characters = []
    for label in np.unique(labels):

        # if this is the background label, ignore it
        if label == 0:
            continue
        # otherwise, construct the label mask to display
        # only connected components for the current label,
        # then find contours in the label mask
        labelMask = np.zeros(thresh.shape, dtype='uint8')
        labelMask[labels == label] = 255

        cnts = cv2.findContours(labelMask, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)

        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # ensure at least one contour was found in the mask
        if len(cnts) > 0:

            # grab the largest contour which corresponds
            # to the component in the mask, then grab the
            # bounding box for the contour
            c = max(cnts, key=cv2.contourArea)
            (boxX, boxY, boxW, boxH) = cv2.boundingRect(c)

            # compute the aspect ratio, solodity, and
            # height ration for the component
            aspectRatio = boxW / float(boxH)
            solidity = cv2.contourArea(c) / float(boxW * boxH)
            heightRatio = boxH / float(plate_img.shape[0])

            # determine if the aspect ratio, solidity,
            # and height of the contour pass the rules
            # tests
            keepAspectRatio = aspectRatio < 1.0
            keepSolidity = solidity > 0.15
            keepHeight = heightRatio > 0.5 and heightRatio < 0.95

            # check to see if the component passes
            # all the tests
            if keepAspectRatio and keepSolidity and keepHeight and boxW > 14:

                # compute the convex hull of the contour
                # and draw it on the character candidates
                # mask
                hull = cv2.convexHull(c)

                cv2.drawContours(charCandidates, [hull], -1, 255, -1)

    _, contours, hier = cv2.findContours(charCandidates, cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

    if contours:
        contours = sort_cont(contours)

        # value to be added to each dimension
        # of the character
        addPixel = 4
        for c in contours:
            (x, y, w, h) = cv2.boundingRect(c)
            if y > addPixel:
                y = y - addPixel
            else:
                y = 0
            if x > addPixel:
                x = x - addPixel
            else:
                x = 0
            temp = bgr_thresh[y:y + h + (addPixel * 2),
                              x:x + w + (addPixel * 2)]

            characters.append(temp)

        return characters

    else:
        return None
예제 #52
0
def get_images():
    if request.method == 'GET':
        return render_template('imagediff.html')

    if request.method == 'POST':
        try:
            #Receive the incoming data and split it into two image files
            imageData = json.loads(request.json)
            image1 = imageData['image1']
            image2 = imageData['image2']
        except Exception as ex:
            imageData = json.loads(request.data)
            image1 = imageData['image1']
            image2 = imageData['image2']

        try:
            #Read the image string and convert it into jpeg for the first image
            ImageALocation = os.path.join('ImageA.jpg')
            with open(ImageALocation, "wb") as newFileA:
                newFileA.write(base64.b64decode(image1))
            newFileA.close()

            #Read the image string and convert it into jpeg for the second image
            ImageBLocation = os.path.join('ImageB.jpg')
            with open(ImageBLocation, "wb") as newFileB:
                newFileB.write(base64.b64decode(image2))
            newFileB.close()

            #Call the Image Compare function
            #result = compare_images(ImageALocation,ImageBLocation)
            ###########################################################################################################################
            imageA = cv2.imread(ImageALocation, -1)
            imageB = cv2.imread(ImageBLocation, -1)
            '''imageA = cv2.imread(ImageALocation,0)
            imageB = cv2.imread(ImageBLocation,0)'''
            grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
            grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

            # compute the Structural Similarity Index (SSIM) between the two
            # images, ensuring that the difference image is returned
            (score, diff) = compare_ssim(grayA, grayB, full=True)
            diff = (diff * 255).astype("uint8")
            #print("SSIM: {}".format(score))
            strvar = format(score)
            if strvar == "1.0":
                result = strvar
            else:

                # threshold the difference image, followed by finding contours to
                # obtain the regions of the two input images that differ
                thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV
                                       | cv2.THRESH_OTSU)[1]
                cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = cnts[0] if imutils.is_cv2() else cnts[1]

                # loop over the contours
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                for c in cnts:
                    (x, y, w, h) = cv2.boundingRect(c)
                    cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255),
                                  2)
                    cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255),
                                  2)
                    # show the output images
                    ImageBLocation = os.path.join("Modified.jpg")
                    cv2.imwrite(ImageBLocation, imageB)
                result = ImageBLocation


###############################################################################################################################

            if result == "1.0":
                return result
            else:

                #Open the result image and convert to base64 string for transmission
                with open(result, "rb") as resultFile:
                    data = base64.b64encode(resultFile.read())
                resultFile.close()

                #Package the image string in Json format and send back to caller
                jsonOut = str(data.decode())
                dump = json.dumps(jsonOut)
                load = json.loads(dump)
                return load
        except Exception as ex:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
            print(exc_type, fname, exc_tb.tb_lineno)
            jsonOut = {
                'ErrorMessage':
                str(ex) + str(exc_type) + str(fname) + str(exc_tb.tb_lineno)
            }
            dump = json.dumps(jsonOut)
            load = json.loads(dump)
            return jsonify(load)
예제 #53
0
dim = (width, height)
image = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
marker = find_marker(image)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# print(focalLength)

# loop over the images
for imagePath in sorted(paths.list_images("images")):
    # load the image, find the marker in the image, then compute the
    # distance to the marker from the camera
    img = cv2.imread(imagePath)
    scale_percent = 30
    width = int(img.shape[1] * scale_percent / 100)
    height = int(img.shape[0] * scale_percent / 100)
    dim = (width, height)
    image = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
    marker = find_marker(image)
    inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
    print(inches)

    # draw a bounding box around the image and display it
    box = cv2.cv.BoxPoints(marker) if imutils.is_cv2() else cv2.boxPoints(
        marker)
    box = np.int0(box)
    cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
    cv2.putText(image, "%.2fft" % (inches / 12),
                (image.shape[1] - 200, image.shape[0] - 20),
                cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)
    cv2.imshow("image", image)
    cv2.waitKey(0)