def diffImg(t0, t1, t2):
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    d_final = cv2.bitwise_and(d1, d2)
    d_binary = cv2.threshold(d_final, 35, 255, cv2.THRESH_BINARY)[1]
    d_blur = cv2.blur(d_binary, (15, 15))
    return d_blur
def _colour_approach(person, background):

	# spliting images to red, green & blue components

	person_c = [person[:,:,0], person[:,:,1], person[:,:,2]]
	background_c = [background[:,:,0], background[:,:,1], background[:,:,2]]

	# subtracting images by component

	diff_c = []

	diff_c.append(cv.absdiff(person_c[0], background_c[0]))
	diff_c.append(cv.absdiff(person_c[1], background_c[1]))
	diff_c.append(cv.absdiff(person_c[2], background_c[2]))

	# applying Gaussian blur to each component (reducing noise)

	diff_c[0] = cv.GaussianBlur(diff_c[0], (5, 5), 0)
	diff_c[1] = cv.GaussianBlur(diff_c[1], (5, 5), 0)
	diff_c[2] = cv.GaussianBlur(diff_c[2], (5, 5), 0)

	# merging components to a grey image 
	# cv.add() is a saturated operation (250 + 10 = 260 => 255)

	diff = cv.add(cv.add(diff_c[0], diff_c[1]), diff_c[2])

	# applying Gaussian blur again

	diff_b = cv.GaussianBlur(diff, (11, 11), 0)

	return diff_b
def diffImg(t0, t1, t2):
   d1 = cv2.absdiff(t2, t1)
   d2 = cv2.absdiff(t1, t0)
   #cv2.imshow('d1',d1)
   #cv2.imshow('d2',d2)
   fixframe=cv2.bitwise_and(d1, d2)
   return fixframe
Exemple #4
0
def tantriggs(image):
    # Convert to float
    image = np.float32(image)

    image = cv2.pow(image, GAMMA)
    image = difference_of_gaussian(image)

    # mean 1
    tmp = cv2.pow(cv2.absdiff(image, 0), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # mean 2
    tmp = cv2.pow(cv2.min(cv2.absdiff(image, 0), TAU), ALPHA)
    mean = cv2.mean(tmp)[0]
    image = cv2.divide(image, cv2.pow(mean, 1.0/ALPHA))

    # tanh
    exp_x = cv2.exp(cv2.divide(image, TAU))
    exp_negx = cv2.exp(cv2.divide(-image, TAU))
    image = cv2.divide(cv2.subtract(exp_x, exp_negx), cv2.add(exp_x, exp_negx))
    image = cv2.multiply(image, TAU)

    image = cv2.normalize(image, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)

    return image
def diff(t1, t2, t3):
    #compute diff images
    d1 = cv2.absdiff(t1, t2)
    d2 = cv2.absdiff(t2, t3)
    diffImg = cv2.bitwise_and(d1, d2)
    diffImg = cv2.cvtColor(diffImg, cv2.COLOR_RGB2GRAY)
    return diffImg
Exemple #6
0
 def get_delta(self):
     if len(self.frames) < 3:
         return None
     return cv2.bitwise_and(
         cv2.absdiff(self.frames[2], self.frames[1]),
         cv2.absdiff(self.frames[1], self.frames[0])
     )
def processImage(t0, t1, t2):
	d1 = cv2.absdiff(t1, t0)
	d2 = cv2.absdiff(t2, t0)
	image = cv2.bitwise_and(d1, d2)
	image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
	image = cv2.blur(image, (5,5))
	value, image = cv2.threshold(image, 25, 255, cv2.THRESH_BINARY)
	
	#element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
	#image = cv2.erode(image, element)

	image = cv2.blur(image, (5,5))
	#image = cv2.normalize(image, 0., 1.)
	
	#mean, stddev = cv2.meanStdDev(image)
	#if stddev[0] > 40:
	#	return image

	(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(image)
	
	global cameraWidth, cameraHeight, x11
	xDiv = (cameraWidth+1) / 7.
	yDiv = (cameraHeight+1) / 7.
	if maxVal > 100:
		if x11:
			image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
			cv2.rectangle(image, (maxLoc[0]-10,maxLoc[1]-10), (maxLoc[0]+20,maxLoc[1]+20), (0,0,255), 1)
		#print xLoc, yLoc
		global leftEye, rightEye
		leftEye.setPupilSmoothed(maxLoc[0] / xDiv, maxLoc[1] / yDiv)
		rightEye.setPupilSmoothed(maxLoc[0] / xDiv, maxLoc[1] / yDiv)

	return image
Exemple #8
0
    def diffImg(self, t0, t1, t2):
        if not self.multiFrameDetection:
            return cv.absdiff(t2, t1)

        d1 = cv.absdiff(t2, t1)
        d2 = cv.absdiff(t1, t2)
        return cv.bitwise_and(d1, d2)
def frame_diff(f0, f1, f2):
    d1 = cv2.absdiff(f0,f2)
    d2 = cv2.absdiff(f1, f2)
    result = cv2.bitwise_and(d1,d2)
    
    ret, result = cv2.threshold(result, threshold, 255,cv2.THRESH_BINARY);
    return result
def detect(capture, prev_images):
    # Capture a new frame
    new_frame = capture.grab_frame()

    # Not enough frames: no detection, just store this one
    if len(prev_images) < 2:
        return None, None, new_frame, None

    # Everything to grayscale
    prev_images = [prev_images[1], prev_images[0], new_frame]
    prev_images = [cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
                   for prev_frame in prev_images]
    prev_frame, current_frame, next_frame = prev_images

    # Diff
    d1 = cv2.absdiff(prev_frame, next_frame)
    d2 = cv2.absdiff(next_frame, current_frame)
    motion = cv2.bitwise_and(d1, d2)

    # Threshold & erode
    cv2.threshold(motion, config.DIFF_THRESHOLD, 255, cv2.THRESH_BINARY,
                  dst=motion)
    cv2.erode(motion, kernel_ero, dst=motion)

    # Find and count changes
    number_of_changes, location, std_dev = detect_motion(motion)

    return number_of_changes, std_dev, new_frame, location
Exemple #11
0
def flame_sub(im1,im2,im3,th,blur):
    
    d1 = cv2.absdiff(im3, im2)
    d2 = cv2.absdiff(im2, im1)
    diff = cv2.bitwise_and(d1, d2)
    # 差分が閾値より小さければFalse
    return np.sum(th - diff) > diff.size
def scan(background_gray,cap):
    for i in range(0,3) :
        ret, frame2 = cap.read()
          
    tracking = frame2
    tracking = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
    
    cv2.absdiff(background_gray, tracking, tracking)
    ret,tracking = cv2.threshold(tracking,35,255,cv2.THRESH_BINARY)
    
    #post treatment
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
    erosion = cv2.erode(tracking,kernel, iterations=3)
    dilatation = cv2.dilate(erosion,kernel, iterations=10)
    
    tracking = dilatation
    #blob detection 
    tracking_c =tracking
    tracking_c, contours, hierarchy = cv2.findContours(tracking_c,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    if contours:
        for cnt in contours:
            x,y,w,h = cv2.boundingRect(cnt)
            cv2.rectangle(frame2,(x,y),(x+w,y+h),(255,255,0),5)
    frameBlobs = frame2
    return tracking, contours,frameBlobs
def get_diffs_bw(detect_buffer):
  temp_buff = detect_buffer
  temp_buff_size = temp_buff.shape[0]
  diff_buff = None
  count = 0

  #Store contents of detect_buffer into a temp array
  #absdiff every third capture and throw it into a new temp array
  #Repeat until temp array has a size of 2, then return the bitwise and.
  while True:
    if temp_buff_size == 2:
      cummulativeFrames = cv2.bitwise_and(temp_buff[0], temp_buff[1])
      break
    else:
      count = 0 
      while count < (temp_buff_size / 3):

        diff1 = np.array([cv2.absdiff(temp_buff[(count*3)+2], temp_buff[(count*3)+1])])
        diff2 = np.array([cv2.absdiff(temp_buff[(count*3)+1], temp_buff[(count*3)+0])])

        if diff_buff == None:
          diff_buff = np.concatenate((diff1,diff2),axis=0)
        else:
          diff_buff = np.concatenate((diff_buff,diff1,diff2),axis=0)

        count+=1
    temp_buff = diff_buff 
    diff_buff = None
    temp_buff_size = temp_buff.shape[0]

  return cummulativeFrames
def createOverlay(description):
    if description == "before":
        image = first_before
        projection = cv2.cvtColor(cv2.absdiff(before_projection, first_before), cv2.COLOR_BGR2GRAY)
        #invert        
        projection = cv2.bitwise_not(before_projection)
        path = vidPath + "_2fps.AVI_"+str(before_start)+"_"+str(before_end) + "_before.jpg"

    else if description == "after":
        image = first_after
        projection = cv2.cvtColor(cv2.absdiff(after_projection, first_after), cv2.COLOR_BGR2GRAY)
        #invert        
        projection = cv2.bitwise_not(after_projection)
        path = vidPath + "_2fps.AVI_"+str(after_start)+"_"+str(after_end) + "_after.jpg"
    

    colorTrackImg = np.zeros(image.shape, np.uint8)
    colorTrackImg.fill(255)
    
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    weight=0.5
    colorTrackImg[:,:,1] = projection[:]
    combinedTrackAndPhoto = cv2.addWeighted( colorTrackImg, weight, image, 1-weight, 0 )


    cv2.imwrite(path, projection)
    cv2.imwrite(vidPath + "_2fps.AVI_"+description+"_overlay.jpg", combinedTrackAndPhoto)
def diffImg(t0, t1, t2):
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    result = cv2.bitwise_and(d1, d2)
    (value, result) = cv2.threshold(result, threshold, 255, cv2.THRESH_BINARY)
    scalar = cv2.sumElems(result)
    return scalar
Exemple #16
0
    def edge_detect(self, im, shift=1):
        """Return image filtered for hard color transitions.

        Args:

            im - Image to detect edges.

        Keyword Args:

            shift - Distance to shift image copies before calculating absolute
            difference with original. (default: 1)

        Note that this the (width, height) of returned image will be reduced by
        (shift, shift).

        """

        x_dim, y_dim = im.shape[:2]

        # Create three cropped images to simulate a pixel shift in two
        # different directions. The direction of the shifts is orthagonal so
        # that any direction of edges is detected.
        im_crop = im[0 : x_dim - shift, 0 : y_dim - shift]
        im_shift_up = im[0 : x_dim - shift, shift:y_dim]
        im_shift_left = im[shift:x_dim, 0 : y_dim - shift]
        vertical_detect = cv2.absdiff(im_crop, im_shift_up)
        horizontal_detect = cv2.absdiff(im_crop, im_shift_left)
        return cv2.add(vertical_detect, horizontal_detect)
def foreground(bg_img, raw_img):

	#take the background and subtract somehow from the foreground
	img = raw_img*1
	raw_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
	bg_hsv = cv2.cvtColor(bg_img,cv2.COLOR_BGR2HSV)
	hmask = cv2.absdiff(raw_hsv[:,:,0], bg_hsv[:,:,0])
	smask = cv2.absdiff(raw_hsv[:,:,1], bg_hsv[:,:,1])
	vmask = cv2.absdiff(raw_hsv[:,:,2], bg_hsv[:,:,2])
	ret,hmask_thresh = cv2.threshold(hmask,1.,1.,cv2.THRESH_BINARY)
	ret,smask_thresh = cv2.threshold(smask,1.,1.,cv2.THRESH_BINARY)
	ret, vmask_thresh = cv2.threshold(vmask,1.,1.,cv2.THRESH_BINARY)
	hsv_mask = np.multiply(hmask_thresh, smask_thresh)
	hsv_mask = np.multiply(hsv_mask, vmask_thresh)
	hsv_mask = cv2.dilate(hsv_mask,(100,100) )

	###Filter out colors with extreme values and no red for skin###
	# ret, rmask = cv2.threshold(img[:,:,2],40,1., cv2.THRESH_BINARY)
	# ret, r2mask = cv2.threshold(img[:,:,2],235.,1., cv2.THRESH_BINARY_INV)
	# rb_mask = np.multiply(rmask, r2mask)
	# img[:,:,0 ]=	np.multiply(img[:,:,0], rb_mask)
	# img[:,:,1 ]=	np.multiply(img[:,:,1], rb_mask)
	# img[:,:,2 ]=	np.multiply(img[:,:,2], rb_mask)
	# bmask = cv2.absdiff(img[:,:,0], bg_img[:,:,0])
	# gmask = cv2.absdiff(img[:,:,1], bg_img[:,:,1])
	rmask = cv2.absdiff(img[:,:,2], bg_img[:,:,2])
	ret,rmask_thresh = cv2.threshold(rmask,20.,1.,cv2.THRESH_BINARY)


	##Greyscale mask that kinda worked except for bright lighting
	raw_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	#raw_gray = cv2.GaussianBlur(raw_gray, (5,5), 2)

	bg_gray = cv2.cvtColor(bg_img,cv2.COLOR_BGR2GRAY)
	#bg_gray = cv2.GaussianBlur(bg_gray, (5,5), 5)

	mask =cv2.absdiff(raw_gray, bg_gray)
	ret,mask = cv2.threshold(mask,15.,1.,cv2.THRESH_BINARY)
	###

	###make changes here
	mask = mask*1.0
	mask = np.multiply(rmask_thresh, mask)
	mask = np.multiply(hsv_mask, mask)
	mask = cv2.dilate(mask,(100,100))

	for i in range(4):
	 	mask = cv2.erode(mask*255, (50,50))/255.

 	for i in range(5):
 		mask = cv2.dilate(mask*255., (50,50))/255.

	fg_img = img*1.0
	fg_img[:,:,0 ]=	np.multiply(img[:,:,0], mask)
	fg_img[:,:,1 ]=	np.multiply(img[:,:,1], mask)
	fg_img[:,:,2 ]=	np.multiply(img[:,:,2], mask)

	cv2.imshow("fg_img", np.array(fg_img, dtype= "uint8"))

	return np.array(mask, dtype = "uint8")
def diffImg(t0, t1, t2):
  previous = 0
  d1 = cv2.absdiff(t2, t1)
  d2 = cv2.absdiff(t1, t0)
  result = cv2.bitwise_and(d1, d2)
 
  return result
Exemple #19
0
def differential_Image(d0, d1, d2):

  diff1 = cv2.absdiff(d2, d1)

  diff2 = cv2.absdiff(d1, d0)

  return cv2.bitwise_and(diff1, diff2)
Exemple #20
0
def diffImg(t0, t1, t2):

  d1 = cv2.absdiff(t2, t1)

  d2 = cv2.absdiff(t1, t0)

  return cv2.bitwise_and(d1, d2)
def doConv(images):

        total = []    
        previous = 0
        binaries = []
        #print "Size: ", len(images)
        for i in range(len(images)):       
            img4 = cv2.cvtColor(images[i], cv2.COLOR_BGR2GRAY)    
            img4 = numpy.asarray(img4)
            if i == 0:
                previous = img4
                total = img4
            else:
                newImage = cv2.absdiff(img4, previous)
                binaries.append(newImage)              
            previous = img4
      
        for i in range(len(binaries)):            
                #img4 = sg.convolve(numpy.asarray(images[i]), Ww[0], "valid")       
    
                img4 = numpy.asarray(binaries[i])
                 
                if(i==0):
                    total = img4
                    #total = img4
                else:
                    #total = total + (img4*weights[i])
                    total = cv2.absdiff(img4 , total)
        
                 
        total = total*10
#        total = cv2.cvtColor(total, cv2.COLOR_BGR2GRAY)    
        ret2,total = cv2.threshold(total,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        total = cv2.medianBlur(total,15)        
        return total
def binary_generator(training_element):
	create_folder("binaryfeatures")


	if (training_element == -1):
	    for j in xrange (0,folders):
	        create_folder("binaryfeatures/"+list_folder[j])
	        list_imgs=os.listdir("rgbfeatures/"+list_folder[j])
	        for i in xrange(0,(len(list_imgs)-1)):
	            img1 = cv2.imread("rgbfeatures/"+list_folder[j]+"/001.jpeg",0)
	            bgsub = img1
	            img2 = cv2.imread("rgbfeatures/"+list_folder[j]+"/"+list_imgs[i+1],0)
	            cv2.absdiff(img2, img1, bgsub)
	            bgsub_blur2 = cv2.GaussianBlur(bgsub,(5,5),0)
	            (thresh, im_bw) = cv2.threshold(bgsub_blur2, 55, 255, cv2.THRESH_BINARY)
	            a='{0:03}'.format(i+1)
	            cv2.imwrite("binaryfeatures/"+list_folder[j]+"/bw_"+a+".png",im_bw)

	else:
		list_imgs=os.listdir("rgbfeatures/"+list_folder[training_element])
		create_folder("binaryfeatures/"+list_folder[training_element])
		for i in xrange(0,(len(list_imgs)-1)):
		    img1 = cv2.imread("rgbfeatures/"+list_folder[training_element]+"/001.jpeg",0)
		    bgsub = img1
		    img2 = cv2.imread("rgbfeatures/"+list_folder[training_element]+"/"+list_imgs[i+1],0)
		    cv2.absdiff(img2, img1, bgsub)
		    bgsub_blur2 = cv2.GaussianBlur(bgsub,(5,5),0)
		    (thresh, im_bw) = cv2.threshold(bgsub_blur2, 55, 255, cv2.THRESH_BINARY)
		    a='{0:03}'.format(i+1)
		    cv2.imwrite("binaryfeatures/"+list_folder[training_element]+"/bw_"+a+".png",im_bw)
Exemple #23
0
    def update_roi(self, image, bInvertColor):
        self.image = image
        self.shape = image.shape
        
        # Extract the ROI images.
        if (self.roi is not None):
            self.imgRoi = copy.deepcopy(image[self.roi[1]:self.roi[3], self.roi[0]:self.roi[2]])

            # Color inversion.
            if (bInvertColor):
                self.imgRoiFg = 255-self.imgRoi
            else:
                self.imgRoiFg = self.imgRoi

            # Background Subtraction.
            if (self.params['gui'][self.name]['subtract_bg']):
                with self.lockBackground:
                    if (self.imgRoiBackground is not None):
                        if (self.imgRoiBackground.shape==self.imgRoiFg.shape):
                            if (bInvertColor):
                                self.imgRoiFg = cv2.absdiff(self.imgRoiFg, 255-self.imgRoiBackground.astype(np.uint8))
                            else:
                                self.imgRoiFg = cv2.absdiff(self.imgRoiFg, self.imgRoiBackground.astype(np.uint8))
                        
                
            # Equalize the brightness/contrast.
            if (self.bEqualizeHist):
                if (self.imgRoiFg is not None):
                    self.imgRoiFg -= np.min(self.imgRoiFg)
                    max2 = np.max([1.0, np.max(self.imgRoiFg)])
    
                    self.imgRoiFg *= (255.0/float(max2))
Exemple #24
0
        def getCandidatesFromPPFrame(self,ppframe):
                hsvSplit = cv2.split(ppframe);

                #gray = cv2.bitwise_and(gray,mask)
                # compute the absolute difference between the current frame and

                frameDelta = cv2.absdiff(hsvSplit[0],self.background[0]) + cv2.absdiff(hsvSplit[1],self.background[1]) + cv2.absdiff(hsvSplit[2],self.background[2])

                #thresh = cv2.threshold(frameDelta, 160, 0, cv2.THRESH_TOZERO_INV)[1]
                thresh = cv2.threshold(frameDelta, self.threshold, 255, cv2.THRESH_BINARY)[1]


                # dilate the thresholded image to fill in holes, then find contours
                # on thresholded image
                thresh = cv2.dilate(thresh, None, iterations=2)

                cv2.imshow("Thresh",thresh)

                (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                        cv2.CHAIN_APPROX_SIMPLE)
                cnts = [c for c in cnts if cv2.contourArea(c) > args["min_area"]]

                cnts = sorted(cnts,key=lambda c: -cv2.contourArea(c))

                def getCenter(c):
                        (x, y, w, h) = cv2.boundingRect(c)
                        return((x+w/2,y+h/2))

                coordinates = [ getCenter(c) for c in cnts ]
                #cv2.imshow("Thresh", thresh)
                #cv2.imshow("Frame Delta", frameDelta)


                return(cnts,coordinates)
Exemple #25
0
def diffImg(t0, t1, t2):
    '''Function to Obtain the difference between
    Images
    '''
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    return cv2.bitwise_and(d1, d2)
Exemple #26
0
    def _frame_diff(self, old_frame, new_frame):
        if len(self.humans) == 0:
            self.humans.append(self._find_human(old_frame))
        self.humans.append(self._find_human(new_frame))

        human_old_frame = self.humans[-2]
        human_new_frame = self.humans[-1]

        old_frame_copy = old_frame.copy()
        new_frame_copy = new_frame.copy()

        human = self._Human.union(human_old_frame, human_new_frame)

        x_min, x_max, w = human.x_min, human.x_max, human.w
        num_of_pixels = self.shape.height * self.shape.width
        if w != 0:
            x_l = int(x_min - w * 0.15 if x_min - w * 0.15 >= 0 else 0)
            x_r = int(x_max + w * 0.15 if x_max + w * 0.15 < self.shape.width else self.shape.width)
            for frame in [old_frame_copy, new_frame_copy]:
                cv2.rectangle(frame, (x_l, 0), (x_r, self.shape.height), (0, 0, 0), cv2.FILLED)

            abs_diff = cv2.absdiff(old_frame_copy, new_frame_copy)
            num_of_pixels -= self.shape.height * (x_r - x_l)
        else:
            abs_diff = cv2.absdiff(old_frame, new_frame)

        return float(abs_diff.sum() / (num_of_pixels or 1))
Exemple #27
0
	def diffImg(self, t0, t1, t2):
		"""Uses absdiff to determine the difference between 2 images"""
		# Gets the difference between 2 images
		d1 = cv2.absdiff(t2, t1)
		d2 = cv2.absdiff(t1, t0)
		# Returns the bitwise_and between d1 and d2
  		return cv2.bitwise_and(d1, d2)
 def diffImg3(self, img1, img2, img3):     
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
    gray3 = cv2.cvtColor(img3, cv2.COLOR_BGR2GRAY)    
    diff1 = cv2.absdiff(gray1, gray2)
    diff2 = cv2.absdiff(gray2, gray3 )
    return cv2.bitwise_and(diff1, diff2)
Exemple #29
0
def detect_motion(t0, t1, t2):
    """
    Calcula la diferencia absoluta entre dos pares de imagenes y
    retorna la matriz resultante
    """
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    return cv2.bitwise_and(d1, d2)
Exemple #30
0
 def filter(self, cv_image):
     b, g, r = cv2.split(cv_image.image)
     is_grey = (cv2.absdiff(b, g).sum() == 0
                and cv2.absdiff(b, r).sum() == 0)
     return is_grey
Exemple #31
0
def main():
    # Load the parameters for the motion detection
    try:
        with open(PATH_MD_PARAMS) as f:
            data = csv.DictReader(f)
            for line in data:
                params = line  # Header is read too
    except FileNotFoundError:
        print("[ERROR] Motion detection parameters file not found.")
        raise
    params['iou'] = float(params['iou']) / 2
    params['gaussWindow'] = int(params['gaussWindow'])
    params['residualConnections'] = int(params['residualConnections'])
    params['dilationIterations'] = int(params['dilationIterations'])
    params['sigma'] = float(params['sigma'])
    print("[INFO] Motion detection params:\n", params)

    # Load the CNN
    try:
        loaded_model = transfer_learning.load_model(PATH_ARCHITECTURE,
                                                    PATH_WEIGHTS)
    except FileNotFoundError:
        print("[ERROR] CNN model|weights file not found.")
        raise

    # Run through the video
    first_bbox = min(bbox_heli_ground_truth.keys())
    last_bbox = max(bbox_heli_ground_truth.keys())
    print("[INFO] Using bbox frames {} to {}".format(first_bbox, last_bbox))

    # Single core example
    timing = {
        'Read frame': 0,
        'Convert to grayscale': 0,
        'Stabilize': 0,
        'Double Gauss': 0,
        'Abs diff': [],
        'Thresholding': 0,
        'Dilation': 0,
        'Count boxes': 0,
        'Finalize': 0
    }
    nb_bbox = []  # Stores bbox data for a sim
    # Get ready to store residualConnections frames over and over

    # Create Tracker
    Tracker = OPENCV_OBJECT_TRACKERS["csrt"]()
    flag_tracker_active = False
    #fps = FPS().start()
    # Create an extractor to get the contours later on
    extractor = extract.extractor()
    # Create a deque for frame accumulation
    previous_gray_frame = collections.deque(
        maxlen=params['residualConnections'])
    # Track the results
    Y_prediction = []
    Y_test = []
    # Count global number of bboxes
    counter_bbox = 0
    # Start re-encoding
    if REENCODE:
        reencoded_video = cv2.VideoWriter(PATH_REENCODED_VIDEO, FOURCC, 25,
                                          (1920, 1080))
    t0 = time.perf_counter()  # Mostly used for first frame display

    for frame_number in range(NB_FRAMES):

        current_frame = VIDEO_STREAM.read()[1]

        t1 = time.perf_counter()

        # Check the status of the tracker
        #print("[INFO] Frame {}\tTracker status: {}".format(frame_number, flag_tracker_active))
        flag_tracker_active = False
        if flag_tracker_active:  # The tracker is ON
            flag_success, bbox_roi = Tracker.update(current_frame)
            bbox_roi = [int(value) for value in bbox_roi
                        ]  # The update returns floating point values...
            if frame_number % CNN_CHECK_STRIDE == 0:
                print("[INFO] Verifying tracker at frame", frame_number)
                # Time to verify that the Tracker is still locked on the helico
                prediction, crop = infer_bbox(loaded_model, current_frame,
                                              bbox_roi, METHOD)
                if prediction == 1:  # All good, keep going
                    flag_quit_program = display_frame(current_frame, bbox_roi,
                                                      frame_number,
                                                      flag_tracker_active)
                    #fps.update()
                    if flag_quit_program:
                        return
                    continue
                elif prediction == 0:  # There is actually no helico in the tracked frame!
                    Tracker = OPENCV_OBJECT_TRACKERS["csrt"]()  # Reset tracker
                    flag_tracker_active = False
                    previous_gray_frame = collections.deque(
                        maxlen=params['residualConnections'])
                    pass  # Engage the motion detection algo below
                else:
                    print(
                        "[ERROR] The model is supposed to be a binary classifier"
                    )
                    raise
            else:  # Between checks from the CNN, go to the next frame
                flag_quit_program = display_frame(current_frame, bbox_roi,
                                                  frame_number,
                                                  flag_tracker_active)
                #fps.update()
                if flag_quit_program:
                    return
                continue
        else:  # The tracker is OFF
            #if frame_number%CNN_CHECK_STRIDE:
            #continue
            # Engage the motion detection algo below
            pass

        # Switch the gray space
        current_gray_frame = cv2.cvtColor(current_frame.copy(),
                                          cv2.COLOR_RGB2GRAY)
        #print(len(previous_gray_frame))
        # Populate the deque with the params['residualConnections'] next gray frames
        if len(previous_gray_frame) < params['residualConnections']:
            previous_gray_frame.append(current_gray_frame)
            continue
        t4 = time.perf_counter()

        # Gaussian blur
        current_gauss_frame, previous_gauss_frame = gaussian_blur(
            [current_gray_frame, previous_gray_frame[0]],
            params['gaussWindow'])

        t5 = time.perf_counter()

        # Differentation
        diff_frame = cv2.absdiff(current_gauss_frame, previous_gauss_frame)
        t7 = time.perf_counter()

        # Canny
        canny_frame = canny_contours(diff_frame, params['sigma'])
        t7 = time.perf_counter()

        # Morphological transformations
        morph_frame = cv2.dilate(canny_frame,
                                 None,
                                 iterations=params['dilationIterations'])
        #diff_frame = cv2.morphologyEx(diff_frame, cv2.MORPH_CLOSE, None, iterations=params['dilationIterations'])

        # Get the contours sorted by area (largest first)
        contours = extractor.image_contour(morph_frame,
                                           sorting='area',
                                           min_area=MIN_AREA)
        t8 = time.perf_counter()

        # Process the bbox that came out of the contours
        large_box = 0
        counter_bbox_heli = 0
        if first_bbox <= frame_number <= last_bbox:
            (x_gt, y_gt, w_gt,
             h_gt) = bbox_heli_ground_truth[frame_number]  # Ground Truth data
        else:
            (x_gt, y_gt, w_gt, h_gt) = (1919, 1079, 1, 1)
        counter_failed_detection = 0
        (x, y, w, h) = (0, 0, 0, 0)
        success = []
        for contour in contours:
            c = contour[0]
            # A. Filter out useless BBs
            # 1. if the contour is too small or too large, ignore it
            if cv2.contourArea(c) < MIN_AREA:
                continue
            if cv2.contourArea(c) > MAX_AREA:
                continue
            # compute the bounding box for the contour, draw it on the current_frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)

            # 2. Box partially out of the frame
            if x < 0 or x + w > FRAME_WIDTH or y < 0 or y + h > FRAME_HEIGHT:
                continue
            # 3. Box center is not in the PADDING area next to the edges of the frame
            if not (PADDING < x + w // 2 < FRAME_WIDTH - PADDING
                    and PADDING < y + h // 2 < FRAME_HEIGHT - PADDING):
                continue

            # B. Classify BBs - a large_box is a potential bbox_heli_ground_truth
            large_box += 1
            counter_bbox += 1  # global counter

            # Infer bbox
            prediction, crop = infer_bbox(loaded_model, current_frame,
                                          (x, y, w, h), METHOD)

            # Determine the label for this box based on the IoU with the ground truth one.
            converted_bbox = bbox.xywh_to_x1y1x2y2((x, y, w, h))
            converted_gt_bbox = bbox.xywh_to_x1y1x2y2((x_gt, y_gt, w_gt, h_gt))
            label = 1 if bbox.intersection_over_union(
                converted_bbox, converted_gt_bbox) >= params['iou'] else 0

            # Append both results to their respective lists
            Y_prediction.append(prediction)
            Y_test.append(label)

            #prediction = 0
            name = 'Helico' if prediction else 'Motion'
            color = COLOR['RED'] if prediction else COLOR['BLUE']
            cv2.putText(current_frame, name, (x, y - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            cv2.rectangle(current_frame, (x, y), (x + w, y + h), color, 2)

        # Update the deque
        previous_gray_frame.append(current_gray_frame)
        # Add the total number of bboxes detected so far
        text_bboxes = 'Number of detected bboxes: {}'.format(counter_bbox)
        cv2.putText(current_frame, text_bboxes, (10, 80),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['WHITE'], 2)
        # Add the current accuracy for the CNN
        #accuracy = 1-np.sum(np.abs(np.array(Y_prediction)-np.array(Y_test)))/len(Y_test)
        if len(Y_test):
            conf_mx = confusion_matrix(Y_test, Y_prediction)
            if conf_mx.shape == (2, 2):
                accuracy = (conf_mx[0, 0] + conf_mx[1, 1]) / np.sum(conf_mx)
            else:
                conf_mx = np.zeros((2, 2))
                accuracy = 1
        else:
            conf_mx = np.zeros((2, 2))
            accuracy = 1
        text_accuracy = 'Cumulative accuracy: {:.1f} % TN: {} TP: {} FN: {} FP: {}'.format(
            100 * accuracy, conf_mx[0, 0], conf_mx[1, 1], conf_mx[1, 0],
            conf_mx[0, 1])
        cv2.putText(current_frame, text_accuracy, (10, 60),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['WHITE'], 2)

        # Update tracker state
        tracker_state = 'ON' if flag_tracker_active else 'OFF'
        text_tracker = 'Tracker: {}'.format(tracker_state)
        cv2.putText(current_frame, text_tracker, (10, 40),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['WHITE'], 2)
        # Update FPS and frame count
        fps = 1 / (time.perf_counter() - t0)
        text_count = 'FPS: {:.1f} Frame number: {}'.format(fps, frame_number)
        cv2.putText(current_frame, text_count, (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR['WHITE'], 2)
        t0 = time.perf_counter()
        # Re-encode the frame
        if REENCODE:
            reencoded_video.write(current_frame)

        # Display frame
        cv2.imshow(VIDEO_STREAM_PATH, imutils.resize(current_frame,
                                                     width=1000))
        #cv2.imshow("Diff_frame", diff_frame)
        #cv2.imshow("Canny frame", canny_frame)

        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            #print(Y_prediction)
            #print(Y_test)
            if REENCODE:
                reencoded_video.release()
            return
        """
            print("[INFO] Inferred bbox: ", prediction)
            if prediction == 1:  # First helico detected!
                #Tracker.init(current_frame, (x, y, w, h))
                #flag_tracker_active = True
                #success.append([prediction, crop])
                
                #break
                #continue
            elif prediction == 0:
                counter_failed_detection += 1
                if counter_failed_detection >= MAX_FAILED_INFERENCES:
                    #break  # Not time for another attempt. Improve the motion detection.
                    #continue
            else:
                print("[ERROR] The model is supposed to be a binary classifier")

        print("[INFO] Length success:", len(success))
        if len(success):
            fig, ax = plt.subplots(1, max(2, len(success)))
            print(len(success))
            for index, res in enumerate(success):
                print(index)
                ax[index].imshow(cv2.cvtColor(res[1], cv2.COLOR_BGR2RGB))
                ax[index].set_title("Heli")
                ax[index].axis('off')
            plt.show()
        """
        #fps.update()
        """[Is display_frame still useful?]
        # Display the result from motion detection
        #print("[INFO] Display frame")
        #flag_quit_program = display_frame(current_frame, (x, y, w, h), frame_number, flag_tracker_active)
        
        print()
        
        if flag_quit_program:
            return
        """
        """[For later]
        # Classify bboxes based on their IOU with ground truth
        converted_current_bbox = bbox.xywh_to_x1y1x2y2(bbox_crop)
        converted_ground_truth_bbox = bbox.xywh_to_x1y1x2y2((x_gt, y_gt, w_gt, h_gt))
        if bbox.intersection_over_union(converted_current_bbox, converted_ground_truth_bbox) >= IOU:
            counter_bbox_heli += 1
        """
    #fps.stop()
    conf_mx = confusion_matrix(Y_test, Y_prediction)
    print("[INFO] Confusion Matrix:\n", conf_mx)
    #print("[INFO] Final accuracy: {:.1f}".format(100*accuracy))
    #plt.figure()
    plt.matshow(conf_mx, cmap=plt.cm.gray)
    plt.title("Confusion matrix on {} | Accuracy: {:.1f}%".format(
        FOLDER_NAME, 100 * accuracy))
    plt.savefig("Confusion_matrix_" + FOLDER_NAME, tight_layout=False)
    plt.show()
    if REENCODE:
        reencoded_video.release()
def diffImg(t0, t1, t2):  # Function to calculate difference between images.
    d1 = cv2.absdiff(t2, t1)
    d2 = cv2.absdiff(t1, t0)
    return cv2.bitwise_and(d1, d2)
def Begin(Type):
    avg = None
    count = 0
    flag = 0
    cnt = 0
    first = None
    detectflag = None
    while True:
        if Type == 0:
            frame = vs.read()
        elif Type == 1:
            if vs.more() == False:
                break
            frame = vs.read()
        if first is None:
            frame = imutils.resize(frame, width=500)
            r = selectRoi(frame)
            frameroi = frame[int(r[1]):int(r[1] + r[3]),
                             int(r[0]):int(r[0] + r[2])]
            grayf = cv2.cvtColor(frameroi, cv2.COLOR_BGR2GRAY)
            grayfb = cv2.GaussianBlur(grayf, (21, 21), 0)
            first = 1
        text = "Unoccupied"
        frame = imutils.resize(frame, width=500)
        frameroi = frame[int(r[1]):int(r[1] + r[3]),
                         int(r[0]):int(r[0] + r[2])]
        gray = cv2.cvtColor(frameroi, cv2.COLOR_BGR2GRAY)
        grayb = cv2.GaussianBlur(gray, (5, 5), 0)
        if avg is None:
            print("[INFO] starting background model...")
            avg = grayb.copy().astype("float")
            continue
        cv2.accumulateWeighted(grayb, avg, 0.6)
        frameDelta = cv2.absdiff(grayb, cv2.convertScaleAbs(avg))
        thresh = cv2.threshold(frameDelta, 10, 255, cv2.THRESH_BINARY)[1]
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]
        for c in cnts:
            (x, y, w, h) = cv2.boundingRect(c)
            if w < 20 or h < 20:
                continue
            cv2.rectangle(frameroi, (x, y), (x + w, y + h), (0, 255, 0), 2)
            text = "Occupied"

        if len(cnts) != 0:
            cnt += 1
            flag = 1
            count = 0

        if len(cnts) == 0:
            count += 1
        if count >= 100:
            if flag == 0:
                grayf = gray
                cnt = 0
            else:
                flag = 0
                if cnt >= 50:
                    grayfb = cv2.GaussianBlur(grayf, (21, 21), 0)
                    grayb = cv2.GaussianBlur(gray, (21, 21), 0)
                    frameD = cv2.absdiff(grayb, grayfb)
                    threshd = cv2.threshold(frameD, 35, 255,
                                            cv2.THRESH_BINARY)[1]
                    threshd = cv2.dilate(threshd, None, iterations=2)

                    #cv2.imshow("thred",threshd)
                    cntsd = cv2.findContours(threshd.copy(), cv2.RETR_EXTERNAL,
                                             cv2.CHAIN_APPROX_SIMPLE)
                    cntsd = cntsd[0] if imutils.is_cv2() else cntsd[1]
                    fram = frameroi.copy()
                    #cv2.drawContours(fram, cntsd, -1, (0,255,0), 3)
                    for c in cntsd:
                        (x, y, w, h) = cv2.boundingRect(c)
                        area = w * h
                        if area < 81:
                            continue

                        cv2.rectangle(fram, (x, y), (x + w, y + h),
                                      (0, 255, 0), 2)
                        image1 = grayf[int(y):int(y + h), int(x):int(x + w)]
                        image2 = gray[int(y):int(y + h), int(x):int(x + w)]
                        s = ssim(image1, image2)
                        if s < 0.4:
                            if detectflag == None:
                                print("object is detected")
                                detectflag = 1
                            cv2.rectangle(frameroi, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            continue
                        if area > 2500:

                            c = compare(image1, image2)
                            if c >= 3:
                                if detectflag == None:
                                    print("object is detected")
                                    detectflag = 1
                                cv2.rectangle(frameroi, (x, y), (x + w, y + h),
                                              (0, 255, 0), 2)
                        #cv2.imshow("Security", frame
                    detectflag = None

                    cv2.imshow("Security", frameroi)
                    #cv2.imshow("Sec", fram)
                    cv2.waitKey(1)

        cv2.imshow("Security Feed", frameroi)
        cv2.imshow("thresh", thresh)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
while True:
    #     a += 1
    check, frame = video.read()
    status = 0
    #     print(frame)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if first_name is None:
        first_frame = gray
        continue
    status = 1

    delta_frame = cv2.absdiff(first_frame, gray)
    thresh_delta = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
    thresh_delta = cv2.dilate(thresh_delta, None, iterations=0)

    (_, cnts, _) = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)

    for contour in cnts:
        if cv2.contourArea(contour) < 1000:
            continue
        (x, y, w, h) = cv2.boundingRect(contour)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
    status_list.append(status)

    status_list = status_list[-2:]
    def process_frame(self, frame_num, frame_img):
        # type: (int, numpy.ndarray) -> bool, Optional[int]
        """

        Returns:
        
        """
        # Similar to ThresholdDetector, but using the HSV colour space DIFFERENCE instead
        # of single-frame RGB/grayscale intensity (thus cannot detect slow fades with this method).
        cut_list = []
        metric_keys = self._metric_keys
        _unused = ''

        if self.last_frame is not None:
            # Change in average of HSV (hsv), (h)ue only, (s)aturation only, (l)uminance only.
            delta_hsv_avg, delta_h, delta_s, delta_v = 0.0, 0.0, 0.0, 0.0

            if (self.stats_manager is not None
                    and self.stats_manager.metrics_exist(
                        frame_num, metric_keys)):
                delta_hsv_avg, delta_h, delta_s, delta_v = self.stats_manager.get_metrics(
                    frame_num, metric_keys)

            else:
                curr_hsv = cv2.cvtColor(frame_img, cv2.COLOR_BGR2HSV)
                curr_hsv = curr_hsv.astype(numpy.int16)
                if self.last_hsv is None:
                    last_hsv = cv2.cvtColor(self.last_frame, cv2.COLOR_BGR2HSV)
                    last_hsv = last_hsv.astype(numpy.int16)
                else:
                    last_hsv = self.last_hsv
                # Image math is faster with cv2
                absdiff = cv2.absdiff(curr_hsv, last_hsv)[:3]
                delta_h, delta_s, delta_v = cv2.mean(absdiff)[:3]
                delta_hsv_avg = cv2.mean(
                    numpy.array([delta_h, delta_s, delta_v]))[0]

                if self.stats_manager is not None:
                    self.stats_manager.set_metrics(
                        frame_num, {
                            metric_keys[0]: delta_hsv_avg,
                            metric_keys[1]: delta_h,
                            metric_keys[2]: delta_s,
                            metric_keys[3]: delta_v
                        })

                self.last_hsv = curr_hsv

            if delta_hsv_avg >= self.threshold:
                if self.last_scene_cut is None or (
                    (frame_num - self.last_scene_cut) >= self.min_scene_len):
                    cut_list.append(frame_num)
                    self.last_scene_cut = frame_num

            if self.last_frame is not None and self.last_frame is not _unused:
                del self.last_frame

        # If we have the next frame computed, don't copy the current frame
        # into last_frame since we won't use it on the next call anyways.
        if (self.stats_manager is not None and
                self.stats_manager.metrics_exist(frame_num + 1, metric_keys)):
            self.last_frame = _unused
        else:
            self.last_frame = frame_img.copy()

        return cut_list
	frame = cv2.resize(frame, dim)
	gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	# gray_frame = np.dstack([gray_frame, gray_frame, gray_frame])
	gray_frame = cv2.GaussianBlur(gray_frame, (21, 21), 0)

	# if first_frame is None:
	# 	first_frame = gray_frame
	# 	continue

	if avg is None:
		avg = gray_frame.copy().astype("float")
		continue

	cv2.accumulateWeighted(gray_frame, avg, 0.5)
	frame_delta = cv2.absdiff(gray_frame, cv2.convertScaleAbs(avg))
	# frame_delta = cv2.absdiff(first_frame, gray_frame)
	thresh = cv2.threshold(frame_delta, 20, 255, cv2.THRESH_BINARY)[1]
	thresh = cv2.dilate(thresh, None, iterations=5)

	cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
	cnts = imutils.grab_contours(cnts)

	for c in cnts:
		if cv2.contourArea(c) < args["min_area"]:
			continue

		(x, y, w, h) = cv2.boundingRect(c)
		cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
		text = "Occupied"
Exemple #37
0
def DetectObjectFromImage(beforeImage, afterImage, beforeGrayImage,
                          afterGrayImage):

    resizeRate = GetContour.SquareDetectAndReturnRateAsSquare(beforeGrayImage)
    beforeImage = CustomOpenCV.ResizeImageAsRate(beforeImage, resizeRate)
    beforeGrayImage = CustomOpenCV.ResizeImageAsRate(beforeGrayImage,
                                                     resizeRate)
    afterImage = CustomOpenCV.ResizeImageAsRate(afterImage, resizeRate)
    afterGrayImage = CustomOpenCV.ResizeImageAsRate(afterGrayImage, resizeRate)

    #squareContourData = DetectBackgroundSquare.DetectBackgroundSquareFromImage(beforeImage) #형광색 인식으로 점 4개 찾는 함수
    # in mac
    # this function is not working and falling loop.
    squareContourData = DetectBlackBoardContourFromOriginImage(beforeGrayImage)

    # 굴곡진 큰 사각형 정사각형으로 보정
    perspectiveUpdatedBeforeImage = ImageMatrixMove.ImageMatrixMove(
        beforeImage, squareContourData)
    perspectiveUpdatedAfterImage = ImageMatrixMove.ImageMatrixMove(
        afterImage, squareContourData)

    perspectiveUpdatedBeforeImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedBeforeImage, DefineManager.IMAGE_WIDTH)
    perspectiveUpdatedAfterImage = CustomOpenCV.ResizeImageAsWidth(
        perspectiveUpdatedAfterImage, DefineManager.IMAGE_WIDTH)
    # Resize image as shape [ rateHeight, DefineManager.IMAGE_WIDTH ]

    #CustomOpenCV.ShowImagesWithName([perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage],
    #                                ["perspectiveUpdatedBeforeImage", "perspectiveUpdatedAfterImage"])

    perspectiveUpdatedBeforeGrayImage = cv2.cvtColor(
        perspectiveUpdatedBeforeImage, cv2.COLOR_BGR2GRAY)
    perspectiveUpdatedAfterGrayImage = cv2.cvtColor(
        perspectiveUpdatedAfterImage, cv2.COLOR_BGR2GRAY)

    morphologyKernel = np.ones(
        (Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1,
         Setting.DefineManager.MORPHOLOGY_MASK_SIZE + 1), np.uint8)
    perspectiveUpdatedBeforeMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedBeforeGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    perspectiveUpdatedAfterMorphologyGrayImage = cv2.morphologyEx(
        perspectiveUpdatedAfterGrayImage, cv2.MORPH_OPEN, morphologyKernel)
    # Reduce image noise

    beforeThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedBeforeMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    afterThresholdedBlackBoardImage = cv2.adaptiveThreshold(
        perspectiveUpdatedAfterMorphologyGrayImage,
        Setting.DefineManager.SET_IMAGE_WHITE_COLOR,
        cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,
        Setting.DefineManager.NEIGHBORHOOD_MASK_SIZE, 10)
    # Adaptive Threshold Image
    #CustomOpenCV.ShowImagesWithName([beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage], ['beforeThresholdedBlackBoardImage', 'afterThresholdedBlackBoardImage'])

    differenceBasedOnThreshImage = cv2.absdiff(
        beforeThresholdedBlackBoardImage, afterThresholdedBlackBoardImage)
    differenceBasedOnThreshImage[
        differenceBasedOnThreshImage > Setting.DefineManager.
        EACH_IMAGE_DIFFERENCE_THRESHOLD] = Setting.DefineManager.SET_IMAGE_WHITE_COLOR
    # Detect each image difference from Threshold Image

    #CustomOpenCV.ShowImagesWithName([differenceBasedOnThreshImage], ["differenceBasedOnThreshImage"])
    objectFoundedImage = GetContour.GetObjectImage(
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage)

    humanDetectedContour, contourLineDrawImage = GetContour.GetContour(
        objectFoundedImage, perspectiveUpdatedAfterImage)
    faceMinY, faceMaxY = GetContour.DetectFaceAndGetY(
        perspectiveUpdatedAfterImage)
    navelPoint, faceRate, maxY, minY = GetContour.FindNavel(
        humanDetectedContour, faceMaxY, contourLineDrawImage)
    height = maxY - minY
    importantPoint = GetContour.AngleAsDealWithPointFromContours(
        humanDetectedContour, contourLineDrawImage)

    beforeDrawImage = np.copy(perspectiveUpdatedBeforeImage)
    afterDrawImage = np.copy(perspectiveUpdatedAfterImage)
    functionParameter = []
    for index in range(len(importantPoint)):
        xArray = []
        yArray = []
        for point in importantPoint[index]:
            x, y = point.ravel()
            xArray.append(x)
            yArray.append(y)
        xArray = np.asarray(xArray)
        yArray = np.asarray(yArray)
        if xArray.shape[0] > 0:
            # ax + b = y (a, b 를 받아옴)
            functionCharacteristic = sp.polyfit(
                xArray, yArray, DefineManager.FUNCTION_DIMENSION)
            functionParameter.append(functionCharacteristic)
            yRegressionArray = sp.polyval(functionCharacteristic, xArray)
            err = np.sqrt(
                sum((yArray - yRegressionArray)**2) / yArray.shape[0])
            pointA, pointB = GetContour.GetStartAndEndPointsFromLine(
                functionCharacteristic, xArray)

            cv2.line(beforeDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)
            cv2.line(afterDrawImage, pointA, pointB,
                     DefineManager.RGB_COLOR_GREEN, 1)

    CustomOpenCV.ShowImagesWithName([beforeDrawImage, afterDrawImage])

    return [
        perspectiveUpdatedBeforeImage, perspectiveUpdatedAfterImage, height,
        navelPoint, humanDetectedContour, functionParameter, beforeDrawImage,
        faceRate
    ]
Exemple #38
0
        graybg = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)
        graybg_sink = cv2.cvtColor(bg_sink, cv2.COLOR_BGR2GRAY)
        # Step 2 : medianBlur
        median = cv2.medianBlur(gray, 7)
        median_sink = cv2.medianBlur(gray_sink, 7)
        medianbg = cv2.medianBlur(graybg, 7)
        medianbg_sink = cv2.medianBlur(graybg_sink, 7)
        # Step 3 : find lastframe
        if i % 6 == 0:
            lastframe = median
            lastframe_sink = median_sink
            #print "lastframe dealed"
            raw.truncate(0)
            continue
# Step 4 : absolute diff between lastframe and current frame
        deltab = cv2.absdiff(medianbg, median)
        delta = cv2.absdiff(lastframe, median)
        deltab_sink = cv2.absdiff(medianbg_sink, median_sink)
        delta_sink = cv2.absdiff(lastframe_sink, median_sink)
        # 5 sets of accumulation between frames
        if i % 6 == 1:
            accu_img = delta
            accu_imgb = deltab
            accu_img_sink = delta_sink
            accu_imgb_sink = deltab_sink
        elif i % 6 == 2:
            accu_img = cv2.addWeighted(delta, 0.5, accu_img, 0.5, 0)
            accu_imgb = cv2.addWeighted(deltab, 0.5, accu_imgb, 0.5, 0)
            accu_img_sink = cv2.addWeighted(delta_sink, 0.5, accu_img_sink,
                                            0.5, 0)
            accu_imgb_sink = cv2.addWeighted(deltab_sink, 0.5, accu_imgb_sink,
    def detectWithCam(self):
        self.logger.debug("Starting to detect Motion")
        #variables for TensorFlow human detection    
        model_path = os.path.join(self.TOPDIR, "client/objectDetect/ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb")
        detector = detectorAPI(self.ENVIRON, path_to_ckpt=model_path)

        # define feed from camera
        camera = cv2.VideoCapture(0)
        time.sleep(1)
        # initialize variables used by the motion sensing
        firstFrame = None
        lastAlert = datetime.datetime.today()
        frames = 0

        # loop over the frames of the video feed and detect motion
        while True:
            # if we are busy processing a job then skip motion until we are done
            #if self.ENVIRON["listen"] == False:
            if busyCheck(self.ENVIRON, self.logger) == True:
                continue
                
            # grab the current frame and initialize the occupied/unoccupied text
            self.logger.debug("Getting another frame. ENVIRON listen = %s" % self.ENVIRON["listen"])
            (grabbed, frame) = camera.read()
            frames += 1

            # resize the frame, convert it to grayscale, and blur it
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (21, 21), 0)

            # if the first frame is None, initialize it
            if firstFrame is None:
                firstFrame = gray
                continue

            # compute the absolute difference between the current frame and first frame
            frameDelta = cv2.absdiff(firstFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
            # Update the reference frame
            firstFrame = gray
            # dilate the thresholded image to fill in holes, then find contours on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)
            (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            # loop over the contours
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) < self.min_area:
                    continue
                #motion detected, see if a person was detected by objectDetect
                #------------------------------------------------------------
                objDict = detector.objectCount(frame)
                if 'person' in objDict:
                    lastAlert = self.detectionEvent(lastAlert, camera)

            # check the ENVIRON when frame count reaches check point
            if frames > self.framesCheck:
                self.logger.debug("Checking to see if we should stop detecting motion")
                frames = 0
                if not self.ENVIRON["motion"]:
                    self.logger.debug("Time to stop detecting motion")
                    # cleanup the camera quit function
                    camera.release()
                    break
Exemple #40
0
def match_card(value_image,
               suit_image,
               is_it_table_card,
               VALUE_DIFFERENCE_LIMIT=830,
               SUIT_DIFFERENCE_LIMIT=330):

    #All calculated diffrences amounts are lower than this large number.
    best_value_difference_amount = 1000000
    best_suit_difference_amount = 1000000
    best_value_match_name = "Unknown"
    best_suit_match_name = "Unknown"

    for value_name in [
            'Ace', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight',
            'Nine', 'Ten', 'Jack', 'Queen', 'King'
    ]:

        if is_it_table_card == True:
            image_path = os.path.abspath(os.path.dirname(__file__)) +\
            "/Source Card Images for Celeb/Table Cards/%s.png"%value_name

        elif is_it_table_card == False:
            image_path = os.path.abspath(os.path.dirname(__file__)) +\
            "/Source Card Images for Celeb/My Cards/%s.png"%value_name

        value_source_image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

        if isinstance(value_source_image, type(None)):
            raise Exception("Unable to read %s.png value source image" %
                            value_name)
        #comparing 2 images. value_image and value_source_image sizes should be equal otherwise it errors
        difference_image = cv2.absdiff(value_image, value_source_image)
        difference_amount = int(np.sum(difference_image) / 255)

        if difference_amount < best_value_difference_amount:
            #best_value_difference_image = difference_image
            best_value_difference_amount = difference_amount
            best_value_name = value_name

    for suit_name in ['Spade', 'Heart', 'Club', 'Diamond']:

        if is_it_table_card == True:
            image_path = os.path.abspath(os.path.dirname(__file__)) +\
            "/Source Card Images for Celeb/Table Cards/%s.png"%suit_name
        elif is_it_table_card == False:
            image_path = os.path.abspath(os.path.dirname(__file__)) +\
            "/Source Card Images for Celeb/My Cards/%s.png"%suit_name
        suit_source_image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

        if isinstance(suit_source_image, type(None)):
            raise Exception("Unable to read %s.png suit source image" %
                            suit_name)
        #comparing 2 images. value_image and value_source_image sizes should be equal otherwise it errors
        difference_image = cv2.absdiff(suit_image, suit_source_image)
        difference_amount = int(np.sum(difference_image) / 255)

        if difference_amount < best_suit_difference_amount:
            #best_suit_difference_image = difference_image
            best_suit_difference_amount = difference_amount
            best_suit_name = suit_name

    if (best_value_difference_amount < VALUE_DIFFERENCE_LIMIT):
        best_value_match_name = best_value_name
    if (best_suit_difference_amount < SUIT_DIFFERENCE_LIMIT):
        best_suit_match_name = best_suit_name

    return best_value_match_name, best_suit_match_name, best_value_difference_amount, best_suit_difference_amount
Exemple #41
0
while True:
    check, frame = video.read(
    )  #check function will correspond to activeness of camera and video.read() would capture the image and store the data in a numpy array
    #print(check)
    # print(frame) #just printing hte array of image(numpyarray)
    gray = cv2.cvtColor(
        frame, cv2.COLOR_BGR2GRAY
    )  #we are simply converting our image in a black & white model so that face detection could become easier
    gray = cv2.GaussianBlur(
        gray, (21, 21), 0
    )  #this would help our program to detect any object easily...for more info google it...
    if first_frame is None:
        first_frame = gray
        continue  # consider the background.... the first loop will collect background details and store in first_frame and so untill unless we get a backgruond this loop print no frame
    delta_frame = cv2.absdiff(
        first_frame, gray
    )  # this is anotherimage created by difference in background ..... for suppose some object enters the space so numpy array would change and hence a diffrence array would be created giving us a new anotherimage
    thresh_frame = cv2.threshold(
        delta_frame, 30, 255, cv2.THRESH_BINARY
    )[1]  #this is a threshold set up...If the diffrence betn the pixels of any certain region is more than 30 show it white (255 is code of white if we"ll give 252 green will appoear)..and [1] is beacause threshold gives two values amd here we need the second...
    thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)
    (_, cnts, _) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
    for contours in cnts:
        if cv2.contourArea(contours) < 1000:
            continue
        (x, y, w, h) = cv2.boundingRect(contours)
        cv2.rectangle(frame, (x, w), (x + w, y + h), (0, 255, 0), 3)
    cv2.imshow("Back ground", delta_frame)
    cv2.imshow(
        "Capture", gray
Exemple #42
0
        colorizer = rs.colorizer()
        colorized_depth = np.asanyarray(
            colorizer.colorize(depth_frame).get_data())

        # Create alignment primitive with color as its target stream:
        align = rs.align(rs.stream.color)
        frameset = align.process(frameset)

        # Update color and depth frames:
        aligned_depth_frame = frameset.get_depth_frame()
        colorized_depth = np.asanyarray(
            colorizer.colorize(aligned_depth_frame).get_data())

        ### motion detector
        d = cv2.absdiff(color_init, color)
        gray = cv2.cvtColor(d, cv2.COLOR_BGR2GRAY)
        blur = cv2.GaussianBlur(gray, (5, 5), 0)
        _, th = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
        dilated = cv2.dilate(th, np.ones((3, 3), np.uint8), iterations=3)
        (c, _) = cv2.findContours(dilated, cv2.RETR_TREE,
                                  cv2.CHAIN_APPROX_SIMPLE)
        # cv2.drawContours(color, c, -1, (0, 255, 0), 2)
        color_init = color

        depth = np.asanyarray(aligned_depth_frame.get_data())

        for contour in c:
            if cv2.contourArea(contour) < 1500:
                continue
            (x, y, w, h) = cv2.boundingRect(contour)
import cv2
import numpy as np
from threading import Thread
from playsound import playsound

cap = cv2.VideoCapture(1)
k = True
key_threshold = 1200
ret, t0 = cap.read()
ret, t1 = cap.read()
t0 = cv2.cvtColor(t0, cv2.COLOR_BGR2GRAY)

while True:
    t1 = cv2.cvtColor(t1, cv2.COLOR_BGR2GRAY)
    diff = cv2.absdiff(t1, t0)
    ret, threshold_image = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)
    key1 = cv2.countNonZero(threshold_image[355:410, 0:35])
    key2 = cv2.countNonZero(threshold_image[355:410, 40:85])
    key3 = cv2.countNonZero(threshold_image[355:410, 90:130])
    key4 = cv2.countNonZero(threshold_image[355:410, 135:183])
    key5 = cv2.countNonZero(threshold_image[355:410, 183:230])
    key6 = cv2.countNonZero(threshold_image[355:410, 230:275])
    key7 = cv2.countNonZero(threshold_image[355:410, 275:325])
    key8 = cv2.countNonZero(threshold_image[355:410, 325:370])
    key9 = cv2.countNonZero(threshold_image[355:410, 370:420])
    key10 = cv2.countNonZero(threshold_image[355:410, 420:470])
    key11 = cv2.countNonZero(threshold_image[355:410, 470:515])
    key12 = cv2.countNonZero(threshold_image[355:410, 515:560])
    key13 = cv2.countNonZero(threshold_image[355:410, 560:610])
    key14 = cv2.countNonZero(threshold_image[355:410, 610:638])
Exemple #44
0
def main(textOut, textIn):

    det.__int__()
    camera = cv2.VideoCapture("ch12_20190703090318.mp4")

    firstFrame = None

    # loop over the frames of the video
    while True:
        # grab the current frame
        (grabbed, frame) = camera.read()

        # if the frame could not be grabbed, then we have reached the end
        # of the video
        if not grabbed:
            break

        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=width)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        cv2.imshow('', gray)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)
        #cv2.imshow('',gray)

        # if the first frame is None, initialize it and treat it as the background
        if firstFrame is None:
            firstFrame = gray
            continue

        # computing the absolute difference between the current frame and background(first frame)
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
        # dilating the thresholded image
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[0]
        #print (cnts)
        # loop over the contours
        xset = 0
        rects = []
        start = 0
        for c in cnts:
            #rects.append(c.astype("int"))

            xset += 1
            #print(c)
            #To track the moevement of the object
            up = 0
            down = 0
            movin = 0

            # if the contour is too small, ignore it

            #if cv2.contourArea(c) < 12000:
            #continue
            # compute the bounding box for the contour, draw it on the frame,
            cv2.line(frame, (450, 300), (700, 300), (0, 0, 255), 2)  # red line
            (x, y, w, h) = cv2.boundingRect(c)
            #print(cv2.boundingRect(c))
            rects.append(cv2.boundingRect(c))
            #print(rects[0])
            #print(enumerate(rects))

            #start = 1
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            #cv2.line(frame, (450, 300), (700, 300), (250, 0, 1), 2)  # blue line
            #            cv2.line(frame, (450,300), (700,300), (0, 0, 255), 2)  # red line

            rectagleCenterPont = ((x + x + w) // 2, (y + y + h) // 2)
            cv2.circle(frame, rectagleCenterPont, 1, (0, 0, 255), 5)
            '''print(rectagleCenterPont)
            if(rectagleCenterPont[1]<300):
                down = 1
                movin = 0
            else:
                up = 1
                movin = 1
            print(down, up, movin)
            #check for in and out'''
        _, up, down = det.update(rects, up, down)
        textIn += down
        textOut += up

        # show the frame and record if the user presses a key
        cv2.imshow("Thresh", thresh)
        cv2.imshow("Frame Delta", frameDelta)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        cv2.putText(frame, "In: {}".format(str(textIn)), (10, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame, "Out: {}".format(str(textOut)), (10, 70),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame,
                    datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)
        cv2.imshow("Security Feed", frame)

    #close any open windows
    camera.release()
    cv2.destroyAllWindows()
                              cv2.VideoWriter_fourcc('D', 'I', 'V', 'X'), fps,
                              size)

while True:
    check, frame = video.read()
    status = -1
    text = 'Unoccupied'
    grayImg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    grayImg = cv2.GaussianBlur(grayImg, (21, 21), 0)

    if first_frame is None:
        first_frame = grayImg
    else:
        pass

    deltaFrame = cv2.absdiff(first_frame, grayImg)
    threshFrame = cv2.threshold(deltaFrame, 30, 255, cv2.THRESH_BINARY)[1]
    threshFrame = cv2.dilate(threshFrame, None, iterations=2)

    (cnts, _) = cv2.findContours(threshFrame.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)

    for contour in cnts:
        if cv2.contourArea(contour) < 10000:
            continue
        status = 1

        (x, y, w, h) = cv2.boundingRect(contour)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)

        text = 'Occupied'
 def diff_img(self):
     f0, f1, f2 = self.three_frames()
     d1 = cv2.absdiff(f1, f0)
     d2 = cv2.absdiff(f2, f1)
     self.diff = cv2.bitwise_and(d1, d2)
Exemple #47
0
        #
        #       print(avg.shape)
        #print(avg_show.shape)

        cur_back = avg
        if (flag == 0):
            #buf_back[:] = 0 #no need as buf_back is initialized to be zero matrix
            flag = 10  # pour eliminer le cas ou nframe=1
        if (flag == 10 and count[i - 1] >= nframe):
            buf_back[
                i -
                1] = cur_back  # doub maya3mil hekom el nframe , yimchi ya3ti lil buf_back el deffirence mabin el back w awil frame 5dheha ba3d el back
            flag = 20

        sub = cv2.absdiff(
            cur_back, buf_back[i - 1]
        )  # voila lahna nhoto la difference entre le background wil frame ali 9e3din nitraitiw fiha

        img_show = cv2.resize(img, (400, 400))
        #img_show = img_show.astype(int)
        #print(img_show.shape)
        #print(img_show)
        #time.sleep(1)
        cv2.imshow("img", img_show)  # affichage de l'image originale

        gray_show = cv2.resize(gray, (400, 400))
        #gray_show = gray_show.astype(int)
        cv2.imshow("gray", gray_show)

        #print(cur_back)
        cur_back_show = cv2.resize(cur_back, (400, 400))
	frame = imutils.resize(frame, width=500)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	gray = cv2.GaussianBlur(gray, (21, 21), 0)
 
	# if the average frame is None, initialize it
	if avg is None:
		print ("[INFO] starting background model...")
		avg = gray.copy().astype("float")
		rawCapture.truncate(0)
		continue
 
	# accumulate the weighted average between the current frame and
	# previous frames, then compute the difference between the current
	# frame and running average
	cv2.accumulateWeighted(gray, avg, 0.5)
	frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
	
	# threshold the delta image, dilate the thresholded image to fill
	# in holes, then find contours on thresholded image
	thresh = cv2.threshold(frameDelta, 5, 255,
		cv2.THRESH_BINARY)[1]
	thresh = cv2.dilate(thresh, None, iterations=2)
	#(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
	_, cnts, _= cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
 
	# loop over the contours
	for c in cnts:
		# if the contour is too small, ignore it
		if cv2.contourArea(c) < 5000:
			continue
 
    def video_thread(self):
        # open midi port
        self.out_port = mido.open_output('Output',
                                         client_name='Motion Detector (OUT)')
        logging.info('Output port: {}'.format(self.out_port))

        camera = cv2.VideoCapture(0)
        time.sleep(0.25)

        if self.conf.C_DISPLAY_VIDEO == 1:
            cv2.namedWindow("M2M Motion", cv2.WINDOW_NORMAL)

        # initialize the first frame in the video stream
        previousFrame = None
        gray = None

        # loop over the frames of the video
        while self.gRun:
            (grabbed, frame) = camera.read()

            # if the frame could not be grabbed, then we have reached the end
            # of the video
            if not grabbed:
                break

            # save the previous frame and grab a new
            previousFrame = gray

            # resize the frame, convert it to grayscale, and blur it
            frame = imutils.resize(frame, width=500)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (7, 7), 0)

            # skip diff if this was the first frame
            if previousFrame is None:
                continue

            # compute the absolute difference between the current frame and
            # previous frame
            frameDelta = cv2.absdiff(previousFrame, gray)
            thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]

            # dilate the thresholded image to fill in holes, then find contours
            # on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)

            # compress image array to one int
            currentChange = sum(sum(thresh))

            # update the highest found if needed
            if currentChange >= self.gHighestSeenChange:
                self.gHighestSeenChange = currentChange

            # calucate the amount of change and make it into a MIDI (0-127)
            percent = float(currentChange) / float(self.gHighestSeenChange)
            self.gMidiChange = int(percent * 127)

            # send a MIDI message based on timing
            if self.conf.C_TRIGGER_BY_TIMING == 1:
                if self.gSync == 0:
                    self.gSync = self.conf.C_VIDEO_FPS / self.conf.C_MIDI_MPS
                    logging.debug("Sending " + str(self.gMidiChange))
                    cc = Message('control_change',
                                 channel=13,
                                 control=1,
                                 value=int(self.gMidiChange))
                    self.out_port.send(cc)
                else:
                    self.gSync = self.gSync - 1

            # slowly readjust the highest found
            if self.conf.C_READJUST_AMOUNT != 0 and self.gHighestSeenChange >= int(
                    self.conf.C_READJUST_AMOUNT):
                self.gHighestSeenChange = self.gHighestSeenChange - self.conf.C_READJUST_AMOUNT

            # show display if needed
            if self.conf.C_DISPLAY_VIDEO == 1:
                cv2.putText(thresh,
                            "Movement in MIDI: {}".format(self.gMidiChange),
                            (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (255, 255, 255), 2)
                cv2.imshow("M2M Motion", thresh)

            # check for keyboard input
            cv2.waitKey(1) & 0xFF

            ms = 1000 / self.conf.C_VIDEO_FPS
            time.sleep(ms / 1000.0)  # 1000.0 because we want a float

        # cleanup the camera and close any open windows
        camera.release()
        cv2.destroyAllWindows()
        logging.info("Leaving Video thread.")
Exemple #50
0
def CamMovement(qc):
    global Cam_Run
    firstFrame = None
    hits = 0  # counter for us to cycle over files

    CHNG_THRESH = 65  # Change Threshold used to be 25

    HR_Cam = qc.get()

    cc = qc.get()
    i = 0

    vs = []  # init VS array

    while i < cam_count:
        vs.append(cv2.VideoCapture(i))
        if not vs[i].isOpened():
            print('Could not open webcam #' + str(i) + ' \n')
            vs[i].release()
            vs.pop(i)
            i = i - 1
            break
        i = i + 1

    signal.signal(signal.SIGTERM, sigterm_cam)
    signal.signal(signal.SIGINT, sigterm_cam)

    while Cam_Run:

        # grab the current frame and initialize the occupied/unoccupied
        retval, frame = vs[HR_Cam].read()
        text = "Unoccupied"

        # if the frame could not be grabbed, then we have reached the end
        # of the video

        if frame is None:
            break

    # resize the frame, convert it to grayscale, and blur it

        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        # if the first frame is None, initialize it
        if firstFrame is None:
            firstFrame = gray
            continue

        # compute the absolute difference between the current frame and
        # first frame
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, CHNG_THRESH, 255,
                               cv2.THRESH_BINARY)[1]

        # dilate the thresholded image to fill in holes, then find contours
        # on thresholded image
        thresh = cv2.dilate(thresh, None, iterations=2)
        cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[0] if imutils.is_cv2() else cnts[1]

        # loop over the contours
        Caption = "Empty"
        for c in cnts:

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text
            (x, y, w, h) = cv2.boundingRect(c)
            if (w > 10) and (h > 10):  # trying to eliminate tiny changes
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                text = "Occupied"
                Caption = text + ' !'

    # draw the text and timestamp on the frame
        cv2.putText(frame, "Room Status: {}".format(Caption), (10, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.putText(frame,
                    datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
                    (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35,
                    (0, 0, 255), 1)

        # show the frame
        if text == "Occupied":
            cv2.imwrite(str(hits) + '_Security' + '.png', frame)
            for x in range(cc):
                if x != HR_Cam:
                    retval, frame = vs[x].read()
                    Caption = str(x)
                    cv2.putText(frame, "Camera: {}".format(Caption), (10, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
                    cv2.putText(
                        frame,
                        datetime.datetime.now().strftime(
                            "%A %d %B %Y %I:%M:%S%p"),
                        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.35, (0, 0, 255), 1)
                    cv2.imwrite(
                        str(hits) + '_Cam' + str(x) + '_' + '.png', frame)
            hits = hits + 1
            qc.put([True, datetime.datetime.now()])

            sleep(4)  # give it 4 secs before you grab more frames

        if hits > 19:  # recycle videos so as not to eat space
            hits = 0
    i = 0
    while i < (cam_count - 1):
        vs[i].release()
        vs.pop(i)
        i = i + 1
            def_wt = INITIAL_AVERAGE_WEIGHT
        else:
            def_wt = DEFAULT_AVERAGE_WEIGHT

        cv2.accumulateWeighted(grayFrame, avg, def_wt)

        # export averaged background for use in next video feed run
        # if frame_no > int(total_frames * 0.975):
        if frame_no > int(200):
            grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
            backOut = loc + "/backgrounds/" + camera + "_bg.jpg"
            cv2.imwrite(backOut, grayOp)

        # Compute the grayscale difference between the current grayscale frame and
        # the average of the scene.
        differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
        # blur the difference image
        differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
        #        cv2.imshow("difference", differenceFrame)
        diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
        diffop.write(diffout)

        # get estimated otsu threshold level
        retval, _ = cv2.threshold(differenceFrame, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # add to list of threshold levels
        t_retval.append(retval)

        # apply threshold based on average threshold value
        if frame_no < 10:
            ret2, thresholdImage = cv2.threshold(differenceFrame,
Exemple #52
0
def diff(present_frame, last_frame):
    '''
    Target: 
        predict the position of moving object by diff the 2 continious frames and some cv operation
        we only care about the left-most motion blob, which indicate how far the hand/object reach
        ***when the box or coor(xmin,ymin) occurs on the left side for ***many times/ 
        we can turn the state to ***hand in.
    Agrs: 
        inputs:
            present frame
            last_frame
        outputs:
            bbox:(xmin,ymin,xmax,ymax) of most-left motion blob
    To do: copy the present_gray to last_gray, in oder to get rid of color conversion
    '''
    # var to save the xmin coor
    xmin = inWidth
    ymin = None
    xmax = None
    ymax = None

    present_gray = cv.cvtColor(present_frame, cv.COLOR_BGR2GRAY)
    present_gray = cv.GaussianBlur(present_gray, (21, 21), 0)
    last_gray = cv.cvtColor(last_frame, cv.COLOR_BGR2GRAY)
    last_gray = cv.GaussianBlur(last_gray, (21, 21), 0)

    frame_delta = cv.absdiff(last_gray, present_gray)
    frame_delta = cv.threshold(
        frame_delta, 25, 255,
        cv.THRESH_BINARY)[1]  # the threshold should be modified
    # img show the diff img without operation
    # cv.imshow('diff_1', frame_delta)

    #operation, need to be modified
    # frame_delta = cv.erode(frame_delta, cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3)), iterations=2)
    # frame_delta = cv.dilate(frame_delta, cv.getStructuringElement(cv.MORPH_ELLIPSE, (8, 3)), iterations=2)
    frame_delta = cv.erode(frame_delta, None, iterations=2)
    frame_delta = cv.erode(frame_delta, None, iterations=2)
    frame_delta = cv.erode(frame_delta, None, iterations=2)
    frame_delta = cv.erode(frame_delta, None, iterations=1)
    frame_delta = cv.dilate(frame_delta, None, iterations=10)
    # img show the diff img with operation
    # cv.imshow("diff_2", frame_delta)

    # find be connected components and draw the rect bbox
    (_, cnts, _) = cv.findContours(frame_delta.copy(), cv.RETR_CCOMP,
                                   cv.CHAIN_APPROX_SIMPLE)
    for c in cnts:
        if cv.contourArea(c) < 20:  # need to be tested
            continue
        (x, y, w, h) = cv.boundingRect(c)
        #print(cv.contourArea(c))
        if x < xmin:
            xmin = x
            ymin = y
            xmax = x + w
            ymax = y + h
        # cv.rectangle(frame_delta, (x, y), (x + w, y + h), (0, 255, 0), 5) # when comment these drawing func, gain a higher speed
    # img show the diff img with operation and rect bbox
    # cv.imshow("diff_3", frame_delta)
    return xmin, ymin, xmax, ymax
Exemple #53
0
import cv2
import numpy as np

I = cv2.imread('input/in000301.jpg')
I = cv2.cvtColor(I, cv2.COLOR_BGR2GRAY)
I = cv2.threshold(I, 100, 255, cv2.THRESH_BINARY)
I = I[1]

for i in range(1, 150):
    I_prev = I
    I_clr = cv2.imread('input/in000' + str(300 + i) + '.jpg')
    I = cv2.cvtColor(I_clr, cv2.COLOR_BGR2GRAY)
    I_diff = cv2.absdiff(I, I_prev)
    I_diff = cv2.threshold(I_diff, 40, 255, cv2.THRESH_BINARY)
    I_diff = I_diff[1]
    I_diff = cv2.medianBlur(I_diff, 3)
    I_diff = cv2.dilate(I_diff,
                        cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)),
                        iterations=3)
    I_diff = cv2.erode(I_diff,
                       cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
    retval, labels, stats, centroids = cv2.connectedComponentsWithStats(I_diff)
    # cv2.imshow("Labels", np.uint8(labels / stats.shape[0]*255))
    if (stats.shape[0] > 1):  # czy sa jakies obiekty
        pi, p = max(enumerate(stats[1:, 4]), key=(lambda x: x[1]))
        pi = pi + 1
        # wyrysownie bbox
        cv2.rectangle(
            I_clr, (stats[pi, 0], stats[pi, 1]),
            (stats[pi, 0] + stats[pi, 2], stats[pi, 1] + stats[pi, 3]),
            (255, 0, 0), 2)
Exemple #54
0
import cv2
import numpy as np
cap = cv2.VideoCapture('k.avi')
_, frame1 = cap.read()
_, frame2 = cap.read()
while True:
    diff = cv2.absdiff(frame1, frame2)
    gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    _, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
    dilated = cv2.dilate(thresh, None, iterations=3)
    contours, _ = cv2.findContours(dilated, cv2.RETR_TREE,
                                   cv2.CHAIN_APPROX_SIMPLE)
    # cv2.drawContours(frame1,contours,-1,(0,244,0),2)
    for cont in contours:
        (x, y, w, h) = cv2.boundingRect(cont)
        if cv2.contourArea(cont) < 700:
            continue
        cv2.rectangle(frame1, (x, y), (x + w, y + h), (124, 233, 44), 2)
        print(frame1.shape)
        cv2.putText(frame1, "Status: {}".format('Movement'), (10, 20),
                    cv2.FONT_HERSHEY_COMPLEX, 1, (0, 200, 0), 2)
        cv2.putText(frame1, "Made by KAMI_360 Using Open-cv/python",
                    (190, 520), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0),
                    2)
        cv2.putText(frame1, "KAMI_360 Kindly Alert", (10, 70),
                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (0, 0, 0), 1)
    cv2.imshow('Detected', frame1)
    frame1 = frame2
    ret, frame2 = cap.read()
    if cv2.waitKey(5) == 27:
def test_pair(transform, jpg, mat, out):
    """
        jpg = filename
        mat = filename
    """

    data = scipy.io.loadmat(mat)
    regions = data['regions'].flatten()
    max_type = 0
    for r in regions:
        max_type = max(max_type, r['type'][0][0][0][0])
    r_vals = {}

    for t in np.arange(max_type):
        r_vals[t + 1] = np.array([], 'float32')

    g_vals = copy.deepcopy(r_vals)
    b_vals = copy.deepcopy(r_vals)
    h_vals = copy.deepcopy(r_vals)
    s_vals = copy.deepcopy(r_vals)
    v_vals = copy.deepcopy(r_vals)

    result_stats = {
        'average_abs_err': [],
        'total_pixels': 0,
        'total_error': 0,
        'total_regions': 0,
        'r_vals': r_vals,
        'g_vals': g_vals,
        'b_vals': b_vals,
        'h_vals': h_vals,
        's_vals': s_vals,
        'v_vals': v_vals
    }
    for r in regions:
        logger.info('region')
        x = r['x'][0][0].flatten()
        y = r['y'][0][0].flatten()
        mask = r['mask'][0][0]
        mask3 = cv2.merge([mask, mask, mask])
        print 'x', x
        print 'y', y
        print 'mask shape', mask.shape
        # type in 1- based / matlab-based indices from the list of region types (i.e road, white,
        # yellow, red, or what ever types were annotated)
        print 'type', r['type'][0][0][0][0]
        # color in [r,g,b] where [r,g,b]are between 0 and 1
        print 'color', r['color'][0]
        t = r['type'][0][0][0][0]
        # print 'guy look here'
        region_color = r['color'][0]
        region_color = region_color[0][0]
        rval = region_color[0] * 255.
        gval = region_color[1] * 255.
        bval = region_color[2] * 255.
        image = dtu.image_cv_from_jpg_fn(jpg)
        transformed = transform(image)
        [b2, g2, r2] = cv2.split(transformed)
        thsv = cv2.cvtColor(transformed, cv2.COLOR_BGR2HSV)
        [h2, s2, v2] = cv2.split(thsv)
        r2_ = r2[mask.nonzero()]
        g2_ = g2[mask.nonzero()]
        b2_ = b2[mask.nonzero()]
        h2_ = h2[mask.nonzero()]
        s2_ = s2[mask.nonzero()]
        v2_ = v2[mask.nonzero()]

        result_stats['r_vals'][t] = np.concatenate(
            (result_stats['r_vals'][t], r2_), 0)
        result_stats['g_vals'][t] = np.concatenate(
            (result_stats['g_vals'][t], g2_), 0)
        result_stats['b_vals'][t] = np.concatenate(
            (result_stats['b_vals'][t], b2_), 0)
        result_stats['h_vals'][t] = np.concatenate(
            (result_stats['h_vals'][t], h2_), 0)
        result_stats['s_vals'][t] = np.concatenate(
            (result_stats['s_vals'][t], s2_), 0)
        result_stats['v_vals'][t] = np.concatenate(
            (result_stats['v_vals'][t], v2_), 0)
        absdiff_img = cv2.absdiff(transformed, np.array([bval, gval, rval,
                                                         0.]))
        masked_diff = cv2.multiply(np.array(absdiff_img, 'float32'),
                                   np.array(mask3, 'float32'))
        num_pixels = cv2.sumElems(mask)[0]
        region_error = cv2.sumElems(cv2.sumElems(masked_diff))[0]
        avg_abs_err = region_error / (num_pixels + 1.)
        print 'Average abs. error', avg_abs_err
        result_stats['average_abs_err'].append(avg_abs_err)
        result_stats[
            'total_pixels'] = result_stats['total_pixels'] + num_pixels
        result_stats[
            'total_error'] = result_stats['total_error'] + region_error
        result_stats['total_regions'] = result_stats['total_regions'] + 1
        # XXX: to finish
    return result_stats
Exemple #56
0
def cameraLoop(camera):
    # initialize the first frame in the video stream
    firstFrame = None
    testOccupied = "false"
    firstOccupied = 0
    testOccupiedCounter = 0;
    # loop over the frames of the video
    while True:
        # grab the current frame and initialize the occupied/unoccupied
        # text
        (grabbed, frame) = camera.read()
        text = "Unoccupied"
        frame = frame[25:500, 110:475]
        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)
        # if the first frame is None, initialize it
        if firstFrame is None:
            firstFrame = gray
            continue

        # compute the absolute difference between the current frame and
        # first frame
        frameDelta = cv2.absdiff(firstFrame, gray)
        thresh = cv2.threshold(frameDelta, 50, 255, cv2.THRESH_BINARY)[1]

        # dilate the thresholded image to fill in holes, then find contours
        # on thresholded image
        thresh = cv2.dilate(thresh, None, iterations=2)
        (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)

        # loop over the contours
        for c in cnts:
            # if the contour is too small, ignore it
            if cv2.contourArea(c) < 1000:
                continue

            # compute the bounding box for the contour, draw it on the frame,
            # and update the text

            # (x, y, w, h) = cv2.boundingRect(c)
            # cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            text = "Occupied"

        # draw the text and timestamp on the frame
        # cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
        # cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)


        # show the frame and record if the user presses a key
        cv2.imshow("Security Feed", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key is pressed, break from the loop
        if key == ord("q"):
            camera.release()
            cv2.destroyAllWindows()
            exit()
        if (firstOccupied == 0) & (text == "Occupied") & (testOccupied != "testing"):
            firstOccupied = 1;
        if firstOccupied == 1:
            testOccupied = "testing"
            testOccupiedCounter = 0
            firstOccupiedFrame = gray
            firstOccupied = 0;
        if testOccupied == "testing":
            frameDelta = cv2.absdiff(firstOccupiedFrame, gray)
            thresh = cv2.threshold(frameDelta, 100, 255, cv2.THRESH_BINARY)[1]

            # dilate the thresholded image to fill in holes, then find contours
            # on thresholded image
            thresh = cv2.dilate(thresh, None, iterations=2)
            (cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)
            temptestOccupiedCheck = "true"
            # loop over the contours
            for c in cnts:
                # if the contour is too small, ignore it
                if cv2.contourArea(c) > 500:
                    temptestOccupiedCheck = "false"
                    break

            if temptestOccupiedCheck == "false":
                testOccupiedCounter = 0;
                testOccupied = "false";
                firstOccupied = 0;
            else:
                testOccupiedCounter = testOccupiedCounter + 1

        # if text== "Occupied":
        #     stablecounter = stablecounter+1
        if (text == "Occupied") & (testOccupiedCounter == 50):
            cv2.imshow("test", frame)
            cv2.imwrite("test.jpg", frame)
            with open("test.jpg", "rb") as imageFile:
                f = imageFile.read()
                b = bytearray(f)
            imageFile.close();
            data = visionAPIImg(b)
            digit = isRecycable(data)
            print digit
            arduino.write(str(digit))
            break
baseimage_change = datetime.datetime.now() + datetime.timedelta(minutes=.5)
print('starting endtime: ', endTime)
while checker:
    status = 0
    check, frame = video.read()
    gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray_frame = cv2.GaussianBlur(gray_frame, (25, 25), 0)

    if baseline_image is None or datetime.datetime.now() >= baseimage_change:
        baseline_image = gray_frame
        baseimage_change = datetime.datetime.now() + datetime.timedelta(
            minutes=.5)
        print('baseline_frame_changed')
        continue

    delta = cv2.absdiff(baseline_image, gray_frame)
    threshold = cv2.threshold(delta, 25, 255, cv2.THRESH_BINARY)[1]
    (contours, _) = cv2.findContours(threshold, cv2.RETR_EXTERNAL,
                                     cv2.CHAIN_APPROX_SIMPLE)
    font = cv2.FONT_HERSHEY_SIMPLEX
    if not contours:
        cv2.putText(frame, 'no motion detected!', (0, 50), font, 1,
                    (200, 255, 155), 2, cv2.LINE_AA)
        if datetime.datetime.now() >= endTime:
            print('No motion detected for last one minute: ',
                  datetime.datetime.now())
            print('[info]:storing in database')
            if status == 1:
                times.append(datetime.datetime.now())
            checker = False
            break
Exemple #58
0
    #se nao foi possivel obter frame, nada mais deve ser feito
    if not grabbed:
        break

    #converte frame para escala de cinza e aplica efeito blur (para realcar os contornos)
    FrameGray = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
    FrameGray = cv2.GaussianBlur(FrameGray, (21, 21), 0)

    #como a comparacao eh feita entre duas imagens subsequentes, se o primeiro frame eh nulo (ou seja, primeira "passada" no loop), este eh inicializado
    if PrimeiroFrame is None:
        PrimeiroFrame = FrameGray
        continue

    #ontem diferenca absoluta entre frame inicial e frame atual (subtracao de background)
    #alem disso, faz a binarizacao do frame com background subtraido 
    FrameDelta = cv2.absdiff(PrimeiroFrame, FrameGray)
    FrameThresh = cv2.threshold(FrameDelta, ThresholdBinarizacao, 255, cv2.THRESH_BINARY)[1]
    
    #faz a dilatacao do frame binarizado, com finalidade de elimunar "buracos" / zonas brancas dentro de contornos detectados. 
    #Dessa forma, objetos detectados serao considerados uma "massa" de cor preta 
    #Alem disso, encontra os contornos apos dilatacao.
    FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
    cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    QtdeContornos = 0

    #desenha linhas de referencia 
    CoordenadaYLinhaEntrada = int((height / 2)-OffsetLinhasRef)
    CoordenadaYLinhaSaida = int((height / 2)+OffsetLinhasRef)
    cv2.line(Frame, (0,CoordenadaYLinhaEntrada), (width,CoordenadaYLinhaEntrada), (255, 0, 0), 2)
    cv2.line(Frame, (0,CoordenadaYLinhaSaida), (width,CoordenadaYLinhaSaida), (0, 0, 255), 2)
Exemple #59
0
        break

# resize the frame to 640, convert it to grayscale, and blur it
    frame = imutils.resize(frame, width=640)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    # if the first frame is None, initialize it
    if anchorFrame is None:
        anchorFrame = gray
        continue

# compute the absolute difference between the current frame and anchor frame
    frameDelta = cv2.absdiff(
        anchorFrame, gray
    )  # delta frames is empty for the first frame, so delta start at number 2

    # update anchor frame every 10 frames
    if countframe % 10 == 0:
        anchorFrame = gray

    thresh = cv2.threshold(frameDelta, 10, 255, cv2.THRESH_BINARY)[
        1]  # frame difference intensity set to be 10 or larger

    # dilate the thresholded image to fill in holes, then find contours on thresholded image
    thresh = cv2.dilate(thresh, None, iterations=2)
    #(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    tmp1, cnts, tmp2 = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                        cv2.CHAIN_APPROX_SIMPLE)
Exemple #60
-2
def image_diff_dwt(lhs_image, rhs_image) -> int:
    _, (lhs_LH, lhs_HL, lhs_HH) = pywt.dwt2(lhs_image, 'haar')
    _, (rhs_LH, rhs_HL, rhs_HH) = pywt.dwt2(rhs_image, 'haar')
    d1 = cv2.absdiff(lhs_LH, rhs_LH).sum()
    d2 = cv2.absdiff(lhs_HL, rhs_HL).sum()
    d3 = cv2.absdiff(lhs_HH, rhs_HH).sum()
    return d1 + d2 + d3