def run(self): while True: #Was the last frame no motion; if not, scan frames if not self.scan == 0: if self.noMotion: for x in range(1, self.scan): self.cap.grab() self.frame_count = self.frame_count + 1 else: pass else: pass # Capture frame from file ret, current_image = self.cap.read() if not ret: #If there are no more frames, break break #Cut off the self.bottom 5% if the plotwatcher option is called. if not self.plotwatcher: camera_image = current_image else: camera_image = current_image[1:700, 1:1280] #If set roi, subset the image if not self.set_ROI: current_imageROI = camera_image else: if self.ROI_include == "include": current_imageROI = camera_image[ self.roi_selected[1]:self.roi_selected[3], self.roi_selected[0]:self.roi_selected[2]] else: #Exclude area by making it a white square current_imageROI = camera_image.copy() current_imageROI[ self.roi_selected[1]:self.roi_selected[3], self.roi_selected[0]:self.roi_selected[2]] = 255 self.frame_count += 1 frame_t0 = time.time() #create iterable for scanner #Print trackbar #for some videos this capture doesn't work, and we need to ignore frame if not self.total_frameC == 0.0: #This is a bit convulted, but because of scanning, we might miss the flag to calculate time, give it a step size equal to scan size countR = self.frame_count - np.arange(0, self.scan + 1) #If percent compelted is a multiple of 10, print processing rate. #format frame count to percentage and interger numbers = [ round(x / float(self.total_frameC), 4) * 100 for x in countR ] #is frame count a multiple of 10 if any([x % 10 == 0 for x in numbers]): fc = float(self.frame_count) / self.total_frameC * 100 #Give it a pause feature so it doesn't announce twice on the scan, i a bit ugly, but it doesn't run very often. #if the last time the percent complete was printed was within the scan range, don't print again. if abs(self.frameC_announce - self.frame_count) >= self.scan: print("%.0f %% completed" % fc) print("%.0f candidate motion frames" % self.total_count) self.frameC_announce = self.frame_count #Reset the last time it was printed. ###Adaptively set the aggregate threshold #set floor flag, we can't have negative accAVG floor = 0 if self.adapt: sourceM.adapt(frame_rate=self.frame_rate, accAvg=self.accAvg, file_destination=self.file_destination, floorvalue=self.floorvalue, frame_count=self.frame_count) ############################# ###BACKGROUND SUBTRACTION ############################# grey_image = self.BC.BackGroundSub(current_imageROI) ####################################### ##Contour Analysis and Post-Proccessing ####################################### points = [ ] # Was using this to hold camera_imageROIeither pixel coords or polygon coords. bounding_box_list = [] contourImage = grey_image.copy() # Now calculate movements using the white pixels as "motion" data _, contours, hierarchy = cv2.findContours(contourImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(contours) == 0: #No movement, add to counter self.nocountr = self.nocountr + 1 #self.noMotion flag self.noMotion = True continue for cnt in contours: bounding_rect = cv2.boundingRect(cnt) point1 = (bounding_rect[0], bounding_rect[1]) point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3]) bounding_box_list.append((point1, point2)) # Find the average size of the bbox (targets), then # remove any tiny bboxes (which are probably just noise). # "Tiny" is defined as any box with 1/10th the area of the average box. # This reduces false positives on tiny "sparkles" noise. box_areas = [] for box in bounding_box_list: box_width = box[self.right][0] - box[self.left][0] box_height = box[self.bottom][0] - box[self.top][0] box_areas.append(box_width * box_height) average_box_area = 0.0 if len(box_areas): average_box_area = float(sum(box_areas)) / len(box_areas) trimmed_box_list = [] for box in bounding_box_list: box_width = box[self.right][0] - box[self.left][0] box_height = box[self.bottom][0] - box[self.top][0] # Only keep the box if it's not a tiny noise box: if (box_width * box_height) > average_box_area * .3: trimmed_box_list.append(box) #shapely does a much faster job of polygon union #format into shapely bounding feature shape_list = [] ## Centroids of each target and hold on to target blobs bound_center = [] bound_casc_box = [] grabCUTimage = camera_image.copy() for out in trimmed_box_list: sh_out = sg.box(out[0][0], out[0][1], out[1][0], out[1][1]) shape_list.append(sh_out) #shape_pol=sg.MultiPolygon(shape_list) casc = cascaded_union(shape_list).buffer(1) if casc.type == "MultiPolygon": #draw shapely bounds for p in range(1, len(casc.geoms)): b = casc.geoms[p].bounds if casc.geoms[p].area > ((self.width * self.height) * (float(self.minSIZE / 100))): cv2.rectangle(camera_image, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 0, 255), thickness=2) #Return the centroid to list, rounded two decimals x = round(casc.geoms[p].centroid.coords.xy[0][0], 2) y = round(casc.geoms[p].centroid.coords.xy[1][0], 2) bound_center.append((x, y)) bound_casc_box.append(casc.geoms[p]) else: b = casc.bounds #If bounding polygon is larger than the minsize, draw a rectangle if casc.area > ((self.width * self.height) * (float(self.minSIZE / 100))): cv2.rectangle(camera_image, (int(b[0]), int(b[1])), (int(b[2]), int(b[3])), (0, 0, 255), thickness=2) y = round(casc.centroid.coords.xy[1][0], 2) bound_center.append((x, y)) bound_casc_box.append(casc) if len(bound_center) == 0: self.toosmall = self.toosmall + 1 self.noMotion = True continue ############################## ###Grabcut Image Segmentation# ############################## if self.segment: ####get bounding box of the current blob for blob in bound_casc_box: b = blob.buffer(100).bounds rect = [int(x) for x in b] ###Format into x,y,w,h shapely is different from opencv rectf = tuple([ rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1] ]) mask = np.zeros(grabCUTimage.shape[:2], np.uint8) mask[grey_image == 0] = 0 #Set the rectangle as probable background mask[rect[1]:rect[3], rect[0]:rect[2]] = 2 #Add the background subtracted image mask[grey_image == 255] = 1 bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) if not mask.sum() == 0: cv2.grabCut(grabCUTimage, mask, rectf, bgdModel, fgdModel, 4, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') _, contours, hierarchy = cv2.findContours( mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: bounding_rect = cv2.boundingRect(cnt) point1 = (bounding_rect[0], bounding_rect[1]) point2 = (bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3]) cv2.rectangle(camera_image, point1, point2, (0, 255, 255), thickness=2) #Set flag for inside area inside_area = False if self.set_areacounter: #test drawing center circle for box in bound_center: #is the x coordinate within if area_box[2] > box[0] > area_box[0]: if area_box[3] > box[1] > area_box[1]: inside_area = not inside_area cv2.rectangle(camera_image, (area_box[0], area_box[1]), (area_box[2], area_box[3]), (242, 221, 61), thickness=1, lineType=4) ################################################## ###############Write image to file################ ################################################## if not self.makeVID == "none": if self.makeVID in ("frames", "both"): cv2.imwrite( self.file_destination + "/" + str(self.frame_count) + ".jpg", camera_image) #save the frame count and the time in video, in case user wants to check in the original #create a time object, this relies on the frame_rate being correct! #set seconds sec = timedelta(seconds=int(self.frame_count / float(self.frame_rate))) d = datetime(1, 1, 1) + sec for target in bound_center: stampadd = (str("%d:%d:%d " % (d.hour, d.minute, d.second)), int(self.frame_count), target[0], target[1]) self.stamp.append(stampadd) #if inside area and counter is on, write stamp to a seperate file if self.set_areacounter & inside_area: for target in bound_center: stampadd = (str("%d:%d:%d " % (d.hour, d.minute, d.second)), int(self.frame_count), target[0], target[1]) self.areaC.append(stampadd) #Have a returned counter to balance hitRate self.hitcounter = self.hitcounter + 1 self.total_count = self.total_count + 1 #set flag to motion self.noMotion = False
def run(self): # Capture frame from file ret, camera_imageO = cap.read() if not ret: #finalize the counters for reporting self.frame_count = frame_count self.file_destination = file_destination break #Cut off the bottom 5% if the plotwatcher option is called. if not self.plotwatcher: camera_image = camera_imageO.copy() else: camera_image = camera_imageO[1:700, 1:1280] #If set roi, subset the image if not self.set_ROI: camera_imageROI = camera_image else: if self.ROI_include == "include": camera_imageROI = camera_image[roi[1]:roi[3], roi[0]:roi[2]] else: #Exclude area by making it a white square camera_imageROI = camera_image.copy() camera_imageROI[roi[1]:roi[3], roi[0]:roi[2]] = 255 frame_count += 1 frame_t0 = time.time() #create iterable for scanner #Print trackbar #for some videos this capture doesn't work, and we need to ignore frame if not total_frameC == 0.0: #This is a bit convulted, but because of scanning, we might miss the flag to calculate time, give it a step size equal to scan size countR = frame_count - np.arange(0, self.scan + 1) #If percent compelted is a multiple of 10, print processing rate. #format frame count to percentage and interger numbers = [round(x / float(total_frameC), 3) * 100 for x in countR] #is frame count a multiple of 10 if any([x % 10 == 0 for x in numbers]): fc = float(frame_count) / total_frameC * 100 #Give it a pause feature so it doesn't announce twice on the scan, i a bit ugly, but it doesn't run very often. #if the last time the percent complete was printed was within the scan range, don't print again. if abs(frameC_announce - frame_count) >= self.scan: print("%.0f %% completed" % fc) print("%.0f candidate motion frames" % total_count) frameC_announce = frame_count #Reset the last time it was printed. ####Adaptively set the aggregate threshold, we know that about 95% of data are negatives. #set floor flag, we can't have negative accAVG floor = 0 if self.adapt: sourceM.adapt(frame_rate=self.frame_rate, accAvg=self.accAvg, file_destination=file_destination, floorvalue=self.floorvalue) ############################# ##BACKGROUND SUBTRACTION ############################# grey_image = backgr.BackGroundSub(camera_imageROI) ############################# ###Contour filters ############################# bound_center = backgr.contourFilter(grey_image) if len(bound_center) == 0: self.toosmall = self.toosmall + 1 noMotion = True continue #Set flag for inside area inside_area = False if self.set_areacounter: #test drawing center circle for box in bound_center: #Do this the simple way for now #is the x coordinate within if area_box[2] > box[0] > area_box[0]: if area_box[3] > box[1] > area_box[1]: inside_area = not inside_area if self.ROI_include == "exclude": cv2.rectangle(camera_imageO, (area_box[0], area_box[1]), (area_box[2], area_box[3]), (242, 221, 61), thickness=1, lineType=4) else: cv2.rectangle(display_image, (area_box[0], area_box[1]), (area_box[2], area_box[3]), (242, 221, 61), thickness=1, lineType=4) ################################################## #Write image to file ################################################## if not self.makeVID == "none": if self.makeVID in ("frames", "both"): if self.ROI_include == "exclude": cv2.imwrite(file_destination + "/" + str(frame_count) + ".jpg", camera_imageO) else: cv2.imwrite(file_destination + "/" + str(frame_count) + ".jpg", display_image) #save the frame count and the time in video, in case user wants to check in the original #create a time object, this relies on the frame_rate being correct! #set seconds sec = timedelta(seconds=int(frame_count / float(self.frame_rate))) d = datetime(1, 1, 1) + sec for target in bound_center: stampadd = (str("%d:%d:%d " % (d.hour, d.minute, d.second)), int(frame_count), target[0], target[1]) self.stamp.append(stampadd) #if inside area and counter is on, write stamp to a seperate file if self.set_areacounter & inside_area: for target in bound_center: stampadd = (str("%d:%d:%d " % (d.hour, d.minute, d.second)), int(frame_count), target[0], target[1]) self.areaC.append(stampadd) ################################################## #Have a returned counter to balance hitRate hitcounter = hitcounter + 1 self.total_count = self.total_count + 1 #set flag to motion noMotion = False
def run(self): while True: #Was the last frame no motion; if not, scan frames if not self.scan ==0: if self.noMotion: for x in range(1,self.scan): self.cap.grab() self.frame_count=self.frame_count+1 else: pass else: pass # Capture frame from file ret,current_image = self.cap.read() if not ret: #If there are no more frames, break break #Cut off the self.bottom 5% if the plotwatcher option is called. if not self.plotwatcher: camera_image = current_image else: camera_image = current_image[1:700,1:1280] #If set roi, subset the image if not self.set_ROI: current_imageROI=camera_image else: if self.ROI_include == "include":current_imageROI=camera_image[self.roi_selected[1]:self.roi_selected[3], self.roi_selected[0]:self.roi_selected[2]] else: #Exclude area by making it a white square current_imageROI=camera_image.copy() current_imageROI[self.roi_selected[1]:self.roi_selected[3], self.roi_selected[0]:self.roi_selected[2]]=255 self.frame_count += 1 frame_t0 = time.time() #create iterable for scanner #Print trackbar #for some videos this capture doesn't work, and we need to ignore frame if not self.total_frameC == 0.0: #This is a bit convulted, but because of scanning, we might miss the flag to calculate time, give it a step size equal to scan size countR=self.frame_count - np.arange(0,self.scan+1) #If percent compelted is a multiple of 10, print processing rate. #format frame count to percentage and interger numbers = [ round(x/float(self.total_frameC),4)*100 for x in countR ] #is frame count a multiple of 10 if any([x %10 ==0 for x in numbers]): fc=float(self.frame_count)/self.total_frameC*100 #Give it a pause feature so it doesn't announce twice on the scan, i a bit ugly, but it doesn't run very often. #if the last time the percent complete was printed was within the scan range, don't print again. if abs(self.frameC_announce - self.frame_count) >= self.scan: print("%.0f %% completed" % fc) print( "%.0f candidate motion frames" % self.total_count) self.frameC_announce=self.frame_count #Reset the last time it was printed. ###Adaptively set the aggregate threshold #set floor flag, we can't have negative accAVG floor=0 if self.adapt: sourceM.adapt(frame_rate=self.frame_rate,accAvg=self.accAvg,file_destination=self.file_destination,floorvalue=self.floorvalue,frame_count=self.frame_count) ############################# ###BACKGROUND SUBTRACTION ############################# grey_image=self.BC.BackGroundSub(current_imageROI) ####################################### ##Contour Analysis and Post-Proccessing ####################################### points = [] # Was using this to hold camera_imageROIeither pixel coords or polygon coords. bounding_box_list = [] contourImage=grey_image.copy() # Now calculate movements using the white pixels as "motion" data _,contours,hierarchy = cv2.findContours(contourImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if len(contours) == 0 : #No movement, add to counter self.nocountr=self.nocountr+1 #self.noMotion flag self.noMotion=True continue for cnt in contours: bounding_rect = cv2.boundingRect( cnt ) point1 = ( bounding_rect[0], bounding_rect[1] ) point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] ) bounding_box_list.append( ( point1, point2 ) ) # Find the average size of the bbox (targets), then # remove any tiny bboxes (which are probably just noise). # "Tiny" is defined as any box with 1/10th the area of the average box. # This reduces false positives on tiny "sparkles" noise. box_areas = [] for box in bounding_box_list: box_width = box[self.right][0] - box[self.left][0] box_height = box[self.bottom][0] - box[self.top][0] box_areas.append( box_width * box_height ) average_box_area = 0.0 if len(box_areas): average_box_area = float( sum(box_areas) ) / len(box_areas) trimmed_box_list = [] for box in bounding_box_list: box_width = box[self.right][0] - box[self.left][0] box_height = box[self.bottom][0] - box[self.top][0] # Only keep the box if it's not a tiny noise box: if (box_width * box_height) > average_box_area*.3: trimmed_box_list.append( box ) #shapely does a much faster job of polygon union #format into shapely bounding feature shape_list=[] ## Centroids of each target and hold on to target blobs bound_center=[] bound_casc_box=[] grabCUTimage=camera_image.copy() for out in trimmed_box_list: sh_out=sg.box(out[0][0],out[0][1],out[1][0],out[1][1]) shape_list.append(sh_out) #shape_pol=sg.MultiPolygon(shape_list) casc=cascaded_union(shape_list).buffer(1) if casc.type=="MultiPolygon": #draw shapely bounds for p in range(1,len(casc.geoms)): b=casc.geoms[p].bounds if casc.geoms[p].area > ((self.width * self.height) * (float(self.minSIZE/100))): cv2.rectangle(camera_image,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2) #Return the centroid to list, rounded two decimals x=round(casc.geoms[p].centroid.coords.xy[0][0],2) y=round(casc.geoms[p].centroid.coords.xy[1][0],2) bound_center.append((x,y)) bound_casc_box.append(casc.geoms[p]) else: b=casc.bounds #If bounding polygon is larger than the minsize, draw a rectangle if casc.area > ((self.width * self.height) * (float(self.minSIZE/100))): cv2.rectangle(camera_image,(int(b[0]),int(b[1])),(int(b[2]),int(b[3])),(0,0,255),thickness=2) y=round(casc.centroid.coords.xy[1][0],2) bound_center.append((x,y)) bound_casc_box.append(casc) if len(bound_center) == 0: self.toosmall=self.toosmall+1 self.noMotion=True continue ############################## ###Grabcut Image Segmentation# ############################## if self.segment: ####get bounding box of the current blob for blob in bound_casc_box: b=blob.buffer(100).bounds rect=[int(x) for x in b] ###Format into x,y,w,h shapely is different from opencv rectf=tuple([rect[0],rect[1],rect[2]-rect[0],rect[3]-rect[1]]) mask = np.zeros(grabCUTimage.shape[:2],np.uint8) mask[grey_image == 0] = 0 #Set the rectangle as probable background mask[rect[1]:rect[3],rect[0]:rect[2]] = 2 #Add the background subtracted image mask[grey_image == 255] = 1 bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) if not mask.sum()==0: cv2.grabCut(grabCUTimage,mask,rectf,bgdModel,fgdModel,4,cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') _,contours,hierarchy = cv2.findContours(mask2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) for cnt in contours: bounding_rect = cv2.boundingRect( cnt ) point1 = ( bounding_rect[0], bounding_rect[1] ) point2 = ( bounding_rect[0] + bounding_rect[2], bounding_rect[1] + bounding_rect[3] ) cv2.rectangle(camera_image,point1,point2,(0,255,255),thickness=2) #Set flag for inside area inside_area=False if self.set_areacounter: #test drawing center circle for box in bound_center: #is the x coordinate within if area_box[2] > box[0] > area_box[0]: if area_box[3] > box[1] > area_box[1]: inside_area= not inside_area cv2.rectangle(camera_image,(area_box[0],area_box[1]),(area_box[2],area_box[3]),(242,221,61),thickness=1,lineType=4) ################################################## ###############Write image to file################ ################################################## if not self.makeVID == "none": if self.makeVID in ("frames","both"): cv2.imwrite(self.file_destination + "/"+str(self.frame_count)+".jpg",camera_image) #save the frame count and the time in video, in case user wants to check in the original #create a time object, this relies on the frame_rate being correct! #set seconds sec = timedelta(seconds=int(self.frame_count/float(self.frame_rate))) d = datetime(1,1,1) + sec for target in bound_center: stampadd=(str("%d:%d:%d " % (d.hour,d.minute, d.second)), int(self.frame_count),target[0],target[1]) self.stamp.append(stampadd) #if inside area and counter is on, write stamp to a seperate file if self.set_areacounter & inside_area: for target in bound_center: stampadd=(str("%d:%d:%d " % (d.hour,d.minute, d.second)), int(self.frame_count),target[0],target[1]) self.areaC.append(stampadd) #Have a returned counter to balance hitRate self.hitcounter=self.hitcounter+1 self.total_count=self.total_count+1 #set flag to motion self.noMotion=False
def run(self): # Capture frame from file ret,camera_imageO = cap.read() if not ret: #finalize the counters for reporting self.frame_count=frame_count self.file_destination=file_destination break #Cut off the bottom 5% if the plotwatcher option is called. if not self.plotwatcher: camera_image = camera_imageO.copy() else: camera_image = camera_imageO[1:700,1:1280] #If set roi, subset the image if not self.set_ROI: camera_imageROI=camera_image else: if self.ROI_include == "include":camera_imageROI=camera_image[roi[1]:roi[3], roi[0]:roi[2]] else: #Exclude area by making it a white square camera_imageROI=camera_image.copy() camera_imageROI[roi[1]:roi[3], roi[0]:roi[2]]=255 frame_count += 1 frame_t0 = time.time() #create iterable for scanner #Print trackbar #for some videos this capture doesn't work, and we need to ignore frame if not total_frameC == 0.0: #This is a bit convulted, but because of scanning, we might miss the flag to calculate time, give it a step size equal to scan size countR=frame_count - np.arange(0,self.scan+1) #If percent compelted is a multiple of 10, print processing rate. #format frame count to percentage and interger numbers = [ round(x/float(total_frameC),3)*100 for x in countR ] #is frame count a multiple of 10 if any([x %10 ==0 for x in numbers]): fc=float(frame_count)/total_frameC*100 #Give it a pause feature so it doesn't announce twice on the scan, i a bit ugly, but it doesn't run very often. #if the last time the percent complete was printed was within the scan range, don't print again. if abs(frameC_announce - frame_count) >= self.scan: print("%.0f %% completed" % fc) print( "%.0f candidate motion frames" % total_count) frameC_announce=frame_count #Reset the last time it was printed. ####Adaptively set the aggregate threshold, we know that about 95% of data are negatives. #set floor flag, we can't have negative accAVG floor=0 if self.adapt: sourceM.adapt(frame_rate=self.frame_rate,accAvg=self.accAvg,file_destination=file_destination,floorvalue=self.floorvalue) ############################# ##BACKGROUND SUBTRACTION ############################# grey_image=backgr.BackGroundSub(camera_imageROI) ############################# ###Contour filters ############################# bound_center=backgr.contourFilter(grey_image) if len(bound_center) == 0: self.toosmall=self.toosmall+1 noMotion=True continue #Set flag for inside area inside_area=False if self.set_areacounter: #test drawing center circle for box in bound_center: #Do this the simple way for now #is the x coordinate within if area_box[2] > box[0] > area_box[0]: if area_box[3] > box[1] > area_box[1]: inside_area= not inside_area if self.ROI_include == "exclude": cv2.rectangle(camera_imageO,(area_box[0],area_box[1]),(area_box[2],area_box[3]),(242,221,61),thickness=1,lineType=4) else: cv2.rectangle(display_image,(area_box[0],area_box[1]),(area_box[2],area_box[3]),(242,221,61),thickness=1,lineType=4) ################################################## #Write image to file ################################################## if not self.makeVID == "none": if self.makeVID in ("frames","both"): if self.ROI_include == "exclude": cv2.imwrite(file_destination + "/"+str(frame_count)+".jpg",camera_imageO) else: cv2.imwrite(file_destination + "/"+str(frame_count)+".jpg",display_image) #save the frame count and the time in video, in case user wants to check in the original #create a time object, this relies on the frame_rate being correct! #set seconds sec = timedelta(seconds=int(frame_count/float(self.frame_rate))) d = datetime(1,1,1) + sec for target in bound_center: stampadd=(str("%d:%d:%d " % (d.hour,d.minute, d.second)), int(frame_count),target[0],target[1]) self.stamp.append(stampadd) #if inside area and counter is on, write stamp to a seperate file if self.set_areacounter & inside_area: for target in bound_center: stampadd=(str("%d:%d:%d " % (d.hour,d.minute, d.second)), int(frame_count),target[0],target[1]) self.areaC.append(stampadd) ################################################## #Have a returned counter to balance hitRate hitcounter=hitcounter+1 self.total_count=self.total_count+1 #set flag to motion noMotion=False