Exemplo n.º 1
0
	def __init__(self):
		# make instances of a background subtractor to find the climber.
		d_hist = 100
		self.d_rate = 1.0/d_hist

		self.fgbg_depth = cv2.createBackgroundSubtractorMOG2(history = d_hist, varThreshold=1)
		self.fgbg_depth.setBackgroundRatio(.1)
		self.fgbg_depth.setNMixtures(10)
		self.fgbg_depth.setVarThreshold(24)
		
		bgr_hist = 500
		self.bgr_rate = 1.0/bgr_hist
		self.fgbg_bgr = cv2.createBackgroundSubtractorMOG2(history = bgr_hist)
		self.shadowVal = self.fgbg_bgr.getShadowValue()
def scene_change(input_file):
	cap = cv2.VideoCapture(input_file)
	video_capture = cap
	cv2.ocl.setUseOpenCL(False)
	fgbg = cv2.createBackgroundSubtractorMOG2()
	frame_num = 0
	last_detected = -20
	scene_num = 0
	while(1):
		frame_num = frame_num + 1
		ret, frame = video_capture.read()
		fgmask = fgbg.apply(frame)
		num_white = 0
		flag = 0
		if(frame_num-last_detected>40):
			last_detected = frame_num
			for i in range(fgmask.shape[0]):
				for j in range(fgmask.shape[1]):
					if fgmask[i][j] == 255:
						num_white = num_white+1
						if(num_white>0.8*fgmask.shape[0]*fgmask.shape[0]):
							scene_num = scene_num + 1
							print("Scene changed : ", scene_num)
							flag = 1
							break
				if flag == 1:
					break
		cv2.imshow('Video',fgmask)
		cv2.imshow('Video',frame)
		k = cv2.waitKey(30) & 0xff
		if k == 27:
			break
	cap.release()
	cv2.destroyAllWindows()
Exemplo n.º 3
0
def VideoFrameReaders(VideoDirectory):
    cap = cv2.VideoCapture(VideoDirectory)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    timestamp = []
    count = 0
    try:
        while cap.isOpened():
            ret,frame = cap.read()
            time = cap.get(0) #get the frame in seconds
            timestamp.append(time)

            print timestamp

            if frame == None:
                break;
           # frame = cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY)
            image = frame.reshape((frame.shape[0]*frame.shape[1],3))
            K = 4
            clf = MiniBatchKMeans(K)

            #predict cluster labels and quanitize each color based on the labels

            cls_labels = clf.fit_predict(image)
            print cls_labels
            cls_quant = clf.cluster_centers_astype("uint8")[labels]


    except EOFError:
        pass
Exemplo n.º 4
0
def MOG(cap):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
    print('kernel', kernel)
    # fgbg = cv2.createBackgroundSubtractorMOG()  # seems to be missing
    fgbg = cv2.createBackgroundSubtractorMOG2()
    # fgbg.setDetectShadows(False)

    frame_save = 0

    while True:
        ret, frame = cap.read()
        if not ret:
            break
        # find the change
        fgmask = fgbg.apply(frame)
        # clean up the image
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

        frame_save += 1
        if 100 < frame_save <105:
            cv2.imwrite('frame{}.png'.format(frame_save), frame)

        cv2.imshow('frame',fgmask)
        k = cv2.waitKey(10)
        if k == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 5
0
def segmentar(nombre):
	cap = cv2.VideoCapture(nombre)

	fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
	i=0
	roi_index = 0
	levels=1
	while True:

		ret, frame = cap.read() # Capture frame-by-frame
		if ret == False:
			break
		#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convertir frame a escala de grises
		fgmask = fgbg.apply(frame) # detectar primer plano en movimiento
		retval, thresh = cv2.threshold(fgmask, 200, 256, cv2.THRESH_BINARY); #eliminar sombra
		enmask = cv2.cvtColor(thresh, cv2.COLOR_GRAY2RGB); #triplicar canales, para poder compararlos con un frame en RGB
		enmask = cv2.bitwise_and(frame,enmask) #enmascarar frame original

		cv2.imshow('frame',frame)
		cv2.imshow('fgmask',fgmask)
		#cv2.imshow('thresh',thresh)
		#cv2.imshow('enmask',enmask)
		if cv2.waitKey(20) & 0xFF == ord('q'):
			# When everything done, release the capture
			break
		#print 'frame:',i, ' rate: ', 1. / t1
		i=i+1
Exemplo n.º 6
0
    def __init__(self, threshold = 0.50, num_frames_post_scene = 30,
                 kernel_size = -1):
        """Initializes motion-based scene detector object."""
        # Requires porting to v0.5 API.
        raise NotImplementedError()

        self.threshold = float(threshold)
        self.num_frames_post_scene = int(num_frames_post_scene)

        self.kernel_size = int(kernel_size)
        if self.kernel_size < 0:
            # Set kernel size when process_frame first runs based on
            # video resolution (480p = 3x3, 720p = 5x5, 1080p = 7x7).
            pass

        self.bg_subtractor = cv2.createBackgroundSubtractorMOG2( 
            detectShadows = False )

        self.last_frame_score = 0.0

        self.in_motion_event = False
        self.first_motion_frame_index = -1
        self.last_motion_frame_index = -1
        self.cli_name = 'detect-motion'
        return
    def generateBackground(self):
        minFrame = self.videoPlaybackWidget.getMinRange()
        maxFrame = self.videoPlaybackWidget.getMaxRange()
        stride = self.strideSpinBox.value()
        numFrames = int((maxFrame-minFrame)/stride)
        progress = QProgressDialog("Generating background...", "Abort", 0, numFrames, self)

        progress.setWindowModality(Qt.WindowModal)

        fgbg = cv2.createBackgroundSubtractorMOG2()
        currentFrameNo = self.videoPlaybackWidget.currentFrameNo
        for i, frameNo in enumerate(range(minFrame, maxFrame+1, stride)):
            progress.setValue(i)
            if progress.wasCanceled():
                break

            ret, frame = self.videoPlaybackWidget.readFrame(frameNo)
            fgbg.apply(frame)

        if not progress.wasCanceled():
            self.fgbg = fgbg
            self.setFrame(self.cv_img)

        progress.setValue(numFrames)

        self.videoPlaybackWidget.currentFrameNo = currentFrameNo
Exemplo n.º 8
0
 def __init__(self, features=150, gauss_blur=3, med_blur=5, history=1, clip_limit=100.0, tile_grid=(8, 8)):
     self.num_features = 150
     self.orb = cv2.ORB_create(features)
     self.bsb = cv2.createBackgroundSubtractorMOG2(history)
     self.clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid)
     self.gaussian_blur_factor = gauss_blur
     self.median_blur_factor = med_blur
Exemplo n.º 9
0
    def __init__(self, vmanager, learn_bg=True):
        """
        Args:
            vmanager: VManager
                Used to get references to BoardFinder and Controller, as well as image queue, and frame reading.
            learn_bg: bool
                Set to True to create and maintain a background model, which enables self.get_foreground().

        """
        super().__init__(vmanager)
        self.goban_img = None
        self.canonical_shape = (cvconf.canonical_size, cvconf.canonical_size)
        self._posgrid = PosGrid(cvconf.canonical_size)
        self.mask_cache = None
        self.zone_area = None
        self.intersections = None

        # background-related attributes
        if learn_bg:
            self.bg_model = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
            self.bg_init_frames = 50 if not is_img(self.vmanager.current_video) else 0

        # (quite primal) "learning" attributes. see self._learn()
        self.corrections = queue.Queue(correc_size)
        self.saved_bg = np.zeros(self.canonical_shape + (3,), dtype=np.float32)
        self.deleted = {}
        self.nb_del_samples = 50
Exemplo n.º 10
0
 def update(self):
     while (self.camera.isReady() == False):
         test = 0
     self.ready = True
     wait = 5
     current  = None
     self.first = True
     fgbg = cv2.createBackgroundSubtractorMOG2()
     while(self.run):
         if(self.effects):
             if(self.effect == 1):
                 self.motionTrackOrig()
             elif(self.effect == 2):
                 self.motionTrackBlack()
             elif(self.effect == 3):
                 self.motionTrackBG()
             elif(self.effect == 4):
                 self.motionTrackOutline()
              # #mask#masked_data#diff#masked_data #cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)
         else: #Just pass the image as is
             self.first = True
             frame = self.camera.read()
             #print(wait)
             if wait > 0:
                 wait = wait - 1
             else:
                 #print("recolor")
                 frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
             frame = np.rot90(frame)
             #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
             self.frame = frame
Exemplo n.º 11
0
def play(dev=0):
    cap = cv.VideoCapture(dev)
    # Background subtractor with default parameters: history = 500,
    # varThreshold = 16 and detectShadows = True
    bgsub = cv.createBackgroundSubtractorMOG2(500, 16, False)
    key = 0
    pause = False
    list_frames = []
    img_index = 0

    while(True):
        key = cv.waitKey(1) & 0xFF
        ret, frame = cap.read()

        bgs = bgsub.apply(frame)
        bgs_count = sum(sum(bgs))

        if bgs_count > MAX_BGS:
            list_frames.append(frame)

        if len(list_frames) > MAX_FRAMES:
            cv.imwrite(path + 'capt' + str(img_index) + '.png', cv.flip(frame, 1))
            img_index+=1
            list_frames = []

        cv.imshow('frame', cv.flip(bgs, 1))

        if key == 27:
            break
        if key == 32:
            pause = not pause
        if pause:
            continue

    cv.destroyAllWindows()
Exemplo n.º 12
0
 def __init__(self, bgsample,  min_area = 2000, min_width = None, min_height = None):
     self.bgs = cv2.createBackgroundSubtractorMOG2(100, 8, False)
     self.bgmask = self.bgs.apply(bgsample)
     self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
     self.min_area = min_area
     self.min_width = min_width
     self.min_height = min_height
Exemplo n.º 13
0
def videoframereaders(videodirectory):

    cap = cv2.VideoCapture(videodirectory)
    # define a kernel and subtract the background
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    timestamp = []
    count = 0
    try:
        while cap.isOpened():
            ret,frame = cap.read()
            time = cap.get(0)
            timestamp.append(time)
            print timestamp

            if frame == None:
                break;
            image = frame
            fgmask = fgbg.apply(image)
            fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

            cv2.imshow('frame', fgmask)

            #take the image and perform pyramid mean shift filtering to aid the thresholding step
            fgmask = cv2.cvtColor(fgmask,cv2.COLOR_GRAY2BGR)
            shifted = cv2.pyrMeanShiftFiltering(fgmask, 10, 10)
            print shifted
            cv2.imshow("Input", image)
            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
            thresh = cv2.threshold(gray, 0, 255,
                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
            cv2.imshow("Thresh", thresh)
            L = measure.label(thresh)
            print "Number of components:", np.max(L)

            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
            print("[INFO] {} unique contours found".format(len(cnts)))

            # loop over the contours
            for (i, c) in enumerate(cnts):
                # draw the contour
                ((x, y), _) = cv2.minEnclosingCircle(c)
                #cv2.putText(image, "*{}".format(i + 1), (int(x) - 10, int(y)),
                 #   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 0)
                cv2.drawContours(image, [c], -1, (0, 255, 0), 1)

                # show the output image
                cv2.imshow("Contour", image)

                #cv2.imshow('window-name',image)
                # cv2.imwrite("/home/sami/Desktop/movies/extractFrames/frame%d.jpg" % count, image)
            count = count + 1
            sleep(5)
            if cv2.waitKey(10) & 0xFF == ord('q'):
                break


    except EOFError:
        pass
    return count,timestamp,
Exemplo n.º 14
0
def playVideo(filename):
    frameNum = 0
    cap = cv2.VideoCapture('./Image/'+filename)
    fgbg = cv2.createBackgroundSubtractorMOG2()
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))

    # fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # out = cv2.VideoWriter('output.avi',fourcc,20.0,(1280,720))


    while (1):
        frameNum += 1
        ret, frame = cap.read()

        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        rawdata = cv2.cvtColor(fgmask,cv2.COLOR_GRAY2BGR) # gray values

        # out.write(frame)

        cv2.imshow('frame',fgmask)
        k = cv2.waitKey(1) & 0xff
        if k == 1:
            break
        if frameNum > FRAMEMAX:
            break
    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 15
0
def bgSubtraction(input, v, debug):
        if settingsCon['bgHistoryOld']!=settingsCon['bgHistory'] or settingsCon['bgTreshOld']!=settingsCon['bgTresh']:
                h=int(settingsCon['bgHistory'])
                t=int(settingsCon['bgTresh'])
                #print h," --- ",t
                settingsCon['objBackS'] = cv2.createBackgroundSubtractorMOG2(h, t, 0)
                settingsCon['bgHistoryOld']=settingsCon['bgHistory']
                settingsCon['bgTreshOld']=settingsCon['bgTresh']
        try:
                fgmask = settingsCon['objBackS'].apply(input)
                #image = cv2.GaussianBlur(fgmask,(5,5),3)
                image=cv2.dilate(fgmask, None, 10)
                image=cv2.erode(image, None, 10)
                _, cnts, _ = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)#cv2.RETR_EXTERNAL
                max_area = 200
                minv=(0,0)
                for c in cnts:
                # if the contour is too small, ignore it
                        if settingsCon['minArea']<cv2.contourArea(c)<settingsCon['maxArea']:                                 
                                # compute the bounding box for the contour, draw it on the frame,
                                # and update the text
                                
                                (x, y, w, h) = cv2.boundingRect(c)
                                cv2.rectangle(input, (x, y), (x + w, y + h), (0, 255, 0), 2)
                                cv2.circle(input,(x + w/2, y + h/2),2,(0, 0, 255),-1)
                                if y+h>minv[1]:
                                        minv=(x + w/2, y + h)

                if debug==1:
                        cv2.imshow('Mask', fgmask)
                return (input,minv)
                
        except:
                #print 'Image grab failed.'
                return (input,(0,0))
Exemplo n.º 16
0
def trainBackgroundSubstractorMOG(images):
    bgs = cv2.createBackgroundSubtractorMOG2()
    for i in range(TRAIN_SIZE):
        id = random.choice(range(len(images)));
        frame = cv2.imread(images[id])
        bgs.apply(frame)
    return bgs
Exemplo n.º 17
0
 def _initialize_substractor(self):
     self.fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
     self.fgbg.setHistory(self.history)
     self.fgbg.setShadowThreshold(self.shad_tresh)
     self.fgbg.setVarThreshold(self.var_tresh)
     # self.fgbg.setVarMax(self.var_max)
     # self.fgbg.setVarMin(self.var_min)
     return self.fgbg
Exemplo n.º 18
0
 def __init__(self,  min_area = 2000, min_width = None, min_height = None):
     self.f = 0.5
     self.frame_count = 0
     self.bgs = cv2.createBackgroundSubtractorMOG2(500, 100, detectShadows=False)
     self.min_area = min_area
     self.min_width = min_width
     self.min_height = min_height
     self.learning_rate = -1
Exemplo n.º 19
0
def MOG2init(history, T, nMixtures):
    # create an instance of MoG and setting up its history length
    fgbg = cv2.createBackgroundSubtractorMOG2(history)
    # setting up the protion of the background model
    fgbg.setBackgroundRatio(T)
    # setting up the number of MoG
    fgbg.setNMixtures(nMixtures)
    return fgbg
 def __init__(self):
     """Initialize variables used by Detectors class
     Args:
         None
     Return:
         None
     """
     self.fgbg = cv2.createBackgroundSubtractorMOG2()
Exemplo n.º 21
0
def IdHullTwo(frame1,frame2):
    # THIS DOESNT WORK YET
    fgbg = cv2.createBackgroundSubtractorMOG2()
    fgmask = fgbg.apply(frame1)    
    hullPolygon = 0
    frameAbsDiff = 0
    
    return hullPolygon, frameAbsDiff
Exemplo n.º 22
0
	def __init__(this, **kargs):
		"""Constructor"""
		
		# On stocke la caméra dans un ensemble
		Camera.CAMERAS.add(this)
		
		# Objet fourre-tout
		this.PYON = pyon()
		
		# Objet de capture de la cam
		this._CAP = None
		
		# Fonction ++
		this.onFrameGet = lambda x: x
		
		# Coordonnées de la caméra
		this._POS = D2Point()
		
		# Coordonnées du repère
		this._SPACE = pyon(unknown=2)
		
		# Angles (w:x/h:y) de la caméra
		# EN RADIANS !!
		this._FOV = D2Point()
		
		# Image band + vertical resolution
		this._BAND = D2Point(0, 1)
		this._RES = 1 # (saut entre les lignes)
		
		# Dernière image capturée
		this._FRAME = Empty()
		
		# Image binaire rendue par les détections
		this._BINARY = Empty(channels=1)
		
		# Dernière image de scan calculée
		this._SCAN = Empty()
		
		# Dernière detection
		this._DETECTED = None
		
		# Clic
		this._BOTTOM = False
		
		# Image de référence (pas forcément utilisé, selon l'algo)
		this._REF = Empty()
		
		# Matrice de flou
		this._KERNEL = None
		
		# Objet de suppression d'arrière plan
		this._MOG2 = cv2.createBackgroundSubtractorMOG2(detectShadows=False, **kargs)
		
		# Image de soustraction d'arrière plan
		this._FGMASK = Empty(channels=1)
		
		# Kernel pour le débruitage
		this._ANOISEK = None
def motionDetecter(blur_to_motiondetector_blurred_Queue, file_Queue):
    # Creating MOG object
    #fgbg = cv2.BackgroundSubtractorMOG()
    fgbg = cv2.createBackgroundSubtractorMOG2()

    lastMeanCenter = [-1, -1]

    # Start infinite loop here
    while True:
        motionFlag = 0
        FRACTIONS = list()
        FOREGROUND = list()
        CENTERS = list()
        
        # Receiving FRAMES
        filename, BLURS = blur_to_motiondetector_blurred_Queue.get()

        t1 = time.time()
        while len(BLURS) > 0:
            blurred = BLURS.pop(0)
            edges = cv2.Canny(blurred, 160, 200)
            CENTERS.append(getCenterOfMass(edges))
            fgmask = fgbg.apply(blurred)
            ret, frac = getMotionFromFrame(fgmask)
            motionFlag += ret
            FRACTIONS.append(frac)
        
        del BLURS
        # Getting max foreground percent for every 10 frames
        for i in xrange(VIDEO_LENGTH):
            FOREGROUND.append(max(FRACTIONS[FPS*i:FPS*(i+1)]))

        meanCenters = getMeanCenters(lastMeanCenter, CENTERS)
        lastMeanCenter = meanCenters[-1]
        motionList = getMotionFromCenters(meanCenters)

        # Writing output to file
        # remove the 'blurrer_' from the filename
        with open(filename[8:-4]+'.motion', 'w') as f:
            f.write(str(motionFlag) + '\n')
            f.write(str(FOREGROUND))
            f.write(str(motionList) + '\n')
            f.close()

        # Deleteing temporary used by Blurrer
        os.remove(filename)
        print "Processed MOG and Center of Mass", time.time() - t1

        # upload video and metadata to AWS if motion detected
        if motionFlag > 0 and max(motionList) > 5:
            file_Queue.put((filename, FOREGROUND))
        # delete the video and motion files otherwise
        else:
            os.remove(filename[8:-4]+'.mp4')
            os.remove(filename[8:-4]+'.motion')
                
    return
Exemplo n.º 24
0
def watershed_seg(frame):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2()
    contors = []
    try:
        image = frame
        fgmask = fgbg.apply(image)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)

        fgmask = cv2.cvtColor(fgmask,cv2.COLOR_GRAY2BGR)
        # pre-process the image before performing watershed
        # load the image and perform pyramid mean shift filtering to aid the thresholding step
        shifted = cv2.pyrMeanShiftFiltering(image, 10, 39)
        gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
        thresh = cv2.threshold(gray, 0, 255,
             cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        D = ndimage.distance_transform_edt(thresh)
        localMax = peak_local_max(D, indices=False, min_distance=10,
         labels=thresh)

        # # perform a connected component analysis on the local peaks,
        # # using 8-connectivity, then appy the Watershed algorithm
        markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
        labels = watershed(-D, markers, mask=thresh)

        print("[INFO] {} unique contour found".format(len(np.unique(labels)) - 1))

        #  loop over the unique labels returned by the Watershed
        # algorithm for finding the centriod cx and cy
        # of each contour Cx=M10/M00 and Cy=M01/M00.

        mask = np.zeros(gray.shape, dtype="uint8")
        for label in np.unique(labels):
             # if the label is zero, we are examining the 'background'
              #so simply ignore it
             if label == 0:
                 continue

            #otherwise, allocate memory for the label region and draw
             # it on the mask
             mask[labels == label] = 255
             cv2.imshow('masked', mask)
             # detect contours in the mask and grab the largest one
             cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                 cv2.CHAIN_APPROX_SIMPLE)[-2]
             cnt = cnts[0]
             #areas = [cv2.contourArea(c) for c in cnts]
             #max_index = np.argmax(areas)
             #cnt = cnts[max_index]
             contors.append(cnt)
            # if cv2.waitKey(10) & 0xFF == ord('q'):
             #   break


    except EOFError:
        pass
    return contors
Exemplo n.º 25
0
def background_substraction(filepath):
    #fgbg = cv2.createBackgroundSubtractorMOG()
    fgbg = cv2.createBackgroundSubtractorMOG2()
    # training with frames
    vid = imageio.get_reader(filepath)
    for i in range(100):
        fgmask = fgbg.apply(vid.get_data(i))
    cv2.imshow('frame',fgmask)
    return 
Exemplo n.º 26
0
 def run(self):
     cap = cv2.VideoCapture(self.inname)
     fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=80,detectShadows=False)
     while(1):
         ret, frame = cap.read()
         if not ret:
             break
         fgmask = fgbg.apply(frame)
         self.accumulator=self.accumulator+fgmask
Exemplo n.º 27
0
    def run(self):
        first = True
        self.run_itr = self.run_itr + 1
        print "Run iterantion is ", self.run_itr, " and last while itr was ", self.while_iteration
        self.while_iteration = 0
        self.fgbg = cv2.createBackgroundSubtractorMOG2()

        while True:
            self.while_iteration = self.while_iteration + 1
            #            print "------>While Iteration is ", self.iteration
            color_image, first = self.image_difference(first, 2)
            contour = self.add_contour_in_storage()
            # font = cv2.InitFont(cv2.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 1, 1)
            cv2.line(
                color_image,
                (0, self.frame_height / 2),
                (self.frame_width, self.frame_height / 2),
                color=(250, 0, 0),
                thickness=1,
            )
            cv2.putText(
                color_image,
                "In(%d)" % tracking.up,
                (0, self.frame_height / 2 - 5),
                cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1.0,
                color=(0, 0, 250),
            )
            cv2.putText(
                color_image,
                "Out(%d)" % tracking.down,
                (0, self.frame_height / 2 + 25),
                cv2.FONT_HERSHEY_SIMPLEX,
                fontScale=1.0,
                color=(0, 0, 250),
            )
            #            print "len of points    is ", len(self.list_of_points)
            for cnt in contour:  # while contour:
                bound_rect = cv2.boundingRect(cnt)
                # contour = contour.h_next()
                pt1, pt2, point, area = self.get_rectangle_parameters(bound_rect, color_image)
                cv2.rectangle(color_image, pt1, pt2, (255, 0, 0), 1)
                self.get_points_tracking(point, area, color_image)
            #                tracking = Tracking()
            #                global tracking_dict
            #                tracking_dict = tracking.__dict__
            #                for point in self.list_of_points:
            #                    tracking.add_points_to_tracks(point)
            #            cv2.circle(color_image, (278,10), radius=5, color=(255, 255, 0), thickness=6)
            cv2.imshow("myOutput", color_image)

            c = cv2.waitKey(1) % 0x100
            if c == 27:
                break

        cv2.destroyAllWindows()
Exemplo n.º 28
0
 def Button_open_cam(self):
     if self.cam_on==1:
         return
     width, height = 640, 480
     self.cap = cv2.VideoCapture(0)
     self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
     self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
     self.cam_on=1
     self.fgbg = cv2.createBackgroundSubtractorMOG2(history=100)
     self.show_frame()
Exemplo n.º 29
0
def GMM_zivkovic_list(frame_list ):
    
    fgbg = cv2.createBackgroundSubtractorMOG2()
    
    processed_frames = []
    for frame in frame_list:
        fgmask = fgbg.apply(frame)
        processed_frames.append(fgmask)
        
    
    return processed_frames
Exemplo n.º 30
0
    def go_config(self, config_path=None):

        # load config
        config = configparser.ConfigParser()
        config.read(config_path)

        # remote host settings
        self.endpoint = config.get('host', 'endpoint', fallback=None)

        # platform
        self.pi = config.getboolean('platform', 'pi')
        self.to_stdout = config.getboolean('platform', 'to_stdout')
        self.show_window = config.getboolean('platform', 'show_window')
        self.save_first_frame = config.getboolean('platform', 'save_first_frame')
        self.quit_after_first_frame = config.getboolean('platform', 'quit_after_first_frame')

        # video source settings
        self.crop_x1 = config.getint('video_source', 'frame_x1')
        self.crop_y1 = config.getint('video_source', 'frame_y1')
        self.crop_x2 = config.getint('video_source', 'frame_x2')
        self.crop_y2 = config.getint('video_source', 'frame_y2')
        self.max_width = config.getint('video_source', 'max_width')
        self.b_and_w = config.getboolean('video_source', 'b_and_w')

        # hog settings
        self.hog_win_stride = config.getint('hog', 'win_stride')
        self.hog_padding = config.getint('hog', 'padding')
        self.hog_scale = config.getfloat('hog', 'scale')

        # mog settings
        self.mog_enabled = config.getboolean('mog', 'enabled')
        if self.mog_enabled:
            self.mogbg = cv2.createBackgroundSubtractorMOG2()

        # setup lines
        lines = []
        total_lines = config.getint('triplines', 'total_lines')

        for idx in range(total_lines):
            key = 'line%d' % (idx + 1)
            start = eval(config.get('triplines', '%s_start' % key))
            end = eval(config.get('triplines', '%s_end' % key))
            buffer = config.getint('triplines', '%s_buffer' % key, fallback=10)
            direction_1 = config.get('triplines', '%s_direction_1' % key, fallback='Up')
            direction_2 = config.get('triplines', '%s_direction_2' % key, fallback='Down')
            line = Tripline(point_1=start, point_2=end, buffer_size=buffer, direction_1=direction_1,
                            direction_2=direction_2)
            lines.append(line)

        self.lines = lines
        self.source = config.get('video_source', 'source')
        self.people_options = dict(config.items('person'))

        self.go()
Exemplo n.º 31
0
# 这个脚本意在提供背景减除法实例,生成的图像位置和帧数理应自己修改
# 0 握手 1 拥抱  2 踢  4 打 5 推
labels = ['handshake', 'hug', 'kick', '##', 'hit', 'push']
v_list = os.listdir('./video')
print(v_list)
j = 0
for line in v_list:
    fname = './video/' + line
    print(fname)
    cap = cv2.VideoCapture(fname)
    length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    phase = int((length - 1) / 20)
    print(length, phase)
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=True)
    i = 1
    step = 0
    while (1):
        i += 1
        # 保留二人站立影像
        ret, frame = cap.read()
        if step == 20:
            break
        # 原始图像
        fgmask = fgbg.apply(frame)
        if i % phase != 0:
            continue
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        # # 镜像翻转
        # mirror_frame = cv2.flip(frame, 1)
Exemplo n.º 32
0
import cv2 as cv

cap = cv.VideoCapture('photo_video/vtest.avi')

fgbg = cv.createBackgroundSubtractorMOG2(detectShadows=False)

while True:
    ret, frame = cap.read()
    if frame is None:
        break
    fgmask = fgbg.apply(frame)

    cv.imshow('Frame', frame)
    cv.imshow('FG MASK Frame', fgmask)

    keyboard = cv.waitKey(30)
    if keyboard == 'q' or keyboard == 27:
        print(fgbg.getHistory())
        break
cap.release()
cv.destroyAllWindows()
        cv2.imshow('Thresh', thresh)

        # Making the contours
        thresh1 = copy.deepcopy(thresh)
        contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        length = len(contours)
        maxArea = -1
        if length > 0:
            for i in range(length):  # Gets the biggest contour
                temp = contours[i]
                area = cv2.contourArea(temp)
                if area > maxArea:
                    maxArea = area
                    ci = i

            res = contours[ci]
            hull = cv2.convexHull(res)
            contour = np.zeros(img.shape, np.uint8)
            cv2.drawContours(contour, [res], 0, (0, 255, 0), 2)
            cv2.drawContours(contour, [hull], 0, (0, 0, 255), 3)

        cv2.imshow('Hand Detection', contour)

    # Keyboard OP
    k = cv2.waitKey(10)
    if k == 27:  # press ESC to exit
        break
    elif k == 32:  # press SPACE to capture the background
        bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)
        isBgCaptured = 1
 def __init__(self):
     self.fgbg = cv2.createBackgroundSubtractorMOG2(history=12011,
                                                    varThreshold=10,
                                                    detectShadows=False)
Exemplo n.º 35
0
                else:
                    #orig=(int(pos_prev[0]*1000/xlimit),int(pos_prev[1]*1000/ylimit))
                    end = (int(pos_next[0] * 1000 / newx),
                           int(pos_next[1] * 1000 / newy))
                    orig = (int(pos_next[0] * 1000 / newx),
                            int(pos_next[1] * 1000 / newy))
                    pygame.draw.line(realdraw, (0, 0, 0), end, end, 7)
                    pos_prev = pos_next
        #print(pos_prev)
        #cv2.imshow('output', drawing)
        cv2.circle(frame, pos_prev, 3, (255, 0, 0), 10)
        cv2.imshow('photo', frame)
        cv2.imshow('mask', img)
    # Keyboard OP
    k = cv2.waitKey(10)
    if k == 27:  # press ESC to exit
        camera.release()
        cv2.destroyAllWindows()
        break
    elif k == ord('b'):  # press 'b' to capture the background
        bgModel = cv2.createBackgroundSubtractorMOG2()
        isBgCaptured = 1
        print('!!!Background Captured!!!')
    elif k == ord('r'):  # press 'r' to reset the background
        bgModel = None
        triggerSwitch = False
        isBgCaptured = 0
        print('!!!Reset BackGround!!!')
    #canvas.fill((255,255,255))
    pygame.display.flip()
Exemplo n.º 36
0
                                              OpenCV. You can process both videos and images.'
)
parser.add_argument('--input',
                    type=str,
                    help='Path to a video or a sequence of image.',
                    default='vtest.avi')
parser.add_argument('--algo',
                    type=str,
                    help='Background subtraction method (KNN, MOG2).',
                    default='MOG2')
args = parser.parse_args()

## [create]
#create Background Subtractor objects
if args.algo == 'MOG2':
    backSub = cv.createBackgroundSubtractorMOG2()
else:
    backSub = cv.createBackgroundSubtractorKNN()
## [create]

## [capture]
capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input))
if not capture.isOpened:
    print('Unable to open: ' + args.input)
    exit(0)
## [capture]

while True:
    ret, frame = capture.read()
    if frame is None:
        break
Exemplo n.º 37
0
import cv2
from tracker import *
import dronekit


# Create tracker object
tracker = EuclideanDistTracker()

cap = cv2.VideoCapture(0)

# Object detection from Stable camera
object_detector = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=40)

while True:
    ret, frame = cap.read()
    height, width, _ = frame.shape

    # Extract Region of interest
    roi = frame

    # 1. Object Detection

    mask = object_detector.apply(roi)
    _, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    detections = []

    for cnt in contours:
        # Calculate area and remove small elements
        area = cv2.contourArea(cnt)
        if area > 20000:
Exemplo n.º 38
0
import numpy as np
import cv2
import os

#fps = 15, size = 640*480

count = 0 #프레임 카운터

os.system('sudo modprobe bcm2835-v4l2') #파이카메라 인식시키기 위한 코드
camera = cv2.VideoCapture(0)
camera.set(cv2.CAP_PROP_FPS, 15) #fps 설정
camera.set(3, 640)  # 해상도 설정 3은 width, 4는 height
camera.set(4, 480) 

mog = cv2.createBackgroundSubtractorMOG2()   #차영상을 구하기위한 함수 설정
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))  #노이즈 제거를 위한 커널 설정


def backSubtraction():  # 차영상을 구해 리턴하는 함수

    ret, frame = cap.read()  #ret: 프레임 캡쳐 결과, frame: 캡쳐한 프레임
    fgmask = mog.apply(frame)   #배경제거
    fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)  #차영상의 노이즈 제거
    return fgmask
    # cv2.imshow('mask', fgmask)


def motionDetect(): # 일정시간동안 움직임을 감지해서 결과를 리턴하는 함수

    while True:
Exemplo n.º 39
0
def generate_subtractor():
    global bg_subtractor
    bg_subtractor = cv2.createBackgroundSubtractorMOG2(
        history=history, varThreshold=varThreshold, detectShadows=False)
Exemplo n.º 40
0
def vid_colour_calib(video_name, vidTrack_setup_parameters):

    vid_aspect_ratio = float(
        vidTrack_setup_parameters['loaded_video_aspect_ratio'].split(":")[0]
    ) / float(
        vidTrack_setup_parameters['loaded_video_aspect_ratio'].split(":")[1])
    mod_video_resolution = (400, int(400 / vid_aspect_ratio))

    video_tracking_algorithm = vidTrack_setup_parameters[
        'video_tracking_algorithm']

    if video_tracking_algorithm == "MOG":
        fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
    elif video_tracking_algorithm == "Frame Differencing":
        ref_image_name = vidTrack_setup_parameters['reference_image_name']
        ref_image = cv2.imread(ref_image_name)
        hsv_ref_image = cv2.cvtColor(ref_image, cv2.COLOR_BGR2HSV)

    cap = cv2.VideoCapture(video_name)

    while (cap.isOpened()):
        ret, frame = cap.read()

        if not ret:
            print(
                "Video recording frame not returned. Video recording may be missing or damaged"
            )
            break

        image = cv2.resize(frame, mod_video_resolution)

        try:
            hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            colourmask = cv2.inRange(hsv_image, lower_b, upper_b)

            if video_tracking_algorithm == "MOG":
                bsmask = fgbg.apply(colourmask)
            elif video_tracking_algorithm == "Frame Differencing":
                ref_image_mask = cv2.inRange(hsv_ref_image, lower_b, upper_b)
                bsmask = cv2.absdiff(colourmask, ref_image_mask)

            _, cnts, _ = cv2.findContours(bsmask.copy(), cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)

            centre = None

            # only proceed if at least one contour was found
            if len(cnts) > 0:
                # find the largest contour in the mask, then use it to
                # compute the centroid
                c = max(cnts, key=cv2.contourArea)
                M = cv2.moments(c)
                try:
                    centre = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))

                    cv2.circle(image, centre, 5, (255, 0, 0), -1)
                except:
                    pass

        except:
            pass

        cv2.imshow("Recorded Video", image)
        k = cv2.waitKey(1)
        if k == 27:
            break

        if k == ord('i'):

            def nothing(x):
                pass

            cv2.namedWindow("Colour Mask")

            # create trackbars for color change
            cv2.createTrackbar('Hl', 'Colour Mask', 0, 179, nothing)
            cv2.createTrackbar('Hu', 'Colour Mask', 179, 179, nothing)

            cv2.createTrackbar('Sl', 'Colour Mask', 0, 255, nothing)
            cv2.createTrackbar('Su', 'Colour Mask', 255, 255, nothing)

            cv2.createTrackbar('Vl', 'Colour Mask', 0, 255, nothing)
            cv2.createTrackbar('Vu', 'Colour Mask', 255, 255, nothing)

            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

            while True:
                hl = cv2.getTrackbarPos('Hl', 'Colour Mask')
                sl = cv2.getTrackbarPos('Sl', 'Colour Mask')
                vl = cv2.getTrackbarPos('Vl', 'Colour Mask')

                hu = cv2.getTrackbarPos('Hu', 'Colour Mask')
                su = cv2.getTrackbarPos('Su', 'Colour Mask')
                vu = cv2.getTrackbarPos('Vu', 'Colour Mask')

                lower_b = np.array([hl, sl, vl])
                upper_b = np.array([hu, su, vu])

                mask = cv2.inRange(hsv, lower_b, upper_b)

                cv2.imshow("Colour Mask", mask)
                k = cv2.waitKey(1)
                if k == ord('i'):
                    break

    cap.release()
    cv2.destroyAllWindows()
    return ([hl, sl, vl], [hu, su, vu])
import cv2
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# switch camera to video streaming
cap = cv2.VideoCapture("output.mp4")  # videos/session0_left.avi
# cap = cv2.VideoCapture(1)

a = []
model_dir = ''
bgsMOG = cv2.createBackgroundSubtractorMOG2(history=2,
                                            varThreshold=50,
                                            detectShadows=0)
if cap:
    while True:
        ret, frame = cap.read()
        if ret:
            fgmask = bgsMOG.apply(frame, None, 0.01)
            # To find the contours of the objects
            _, contours, hierarchy = cv2.findContours(fgmask,
                                                      cv2.RETR_EXTERNAL,
                                                      cv2.CHAIN_APPROX_SIMPLE)
            # cv2.drawContours(frame,contours,-1,(0,255,0),cv2.cv.CV_FILLED,32)
            try:
                hierarchy = hierarchy[0]
            except:
                hierarchy = []
            a = []
            for contour, hier in zip(contours, hierarchy):
                (x, y, w, h) = cv2.boundingRect(contour)
Exemplo n.º 42
0
            break

    return (avgSlope, intercept), zone, zoneList


# video capture, takes file path as arg
# integer value for integrated webcam / usb cameras
#vidcap = cv2.VideoCapture(rootDir + path)

vidcap = cv2.VideoCapture(0)
vidcap.set(cv2.CAP_PROP_FPS, framerate)
vidcap.set(cv2.CAP_PROP_FRAME_WIDTH, sWidth)
vidcap.set(cv2.CAP_PROP_FRAME_HEIGHT, sHeight)

subtractor = cv2.createBackgroundSubtractorMOG2(history=100,
                                                varThreshold=50,
                                                detectShadows=False)


def updateBackground():

    for i in range(0, 100):

        success, f = vidcap.read()

        if not success:
            continue

        subtractor.apply(f)

Exemplo n.º 43
0
def detect(s):
    cap = cv2.VideoCapture(s)
    frame = cap.read()[1]
    fshape = frame.shape
    fheight = fshape[0]
    fwidth = fshape[1]
    fgbg = cv2.createBackgroundSubtractorMOG2()
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter('output.avi', fourcc, 25, (fwidth, fheight))
    half_width = fwidth / 2
    count1 = 0
    count2 = 0
    start_time = time.time()
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        recent_time = time.time()
        # Total count of vehicles for 2 min
        if ((recent_time - start_time) > 120):
            #TODO : send count and timestamp to webpage for plotting
            count1 = 0
            count2 = 0
            start_time = recent_time

        # print(frame.shape)

        mask = fgbg.apply(frame)
        kernel = np.ones((10, 10), np.uint8)
        cv2.line(frame, (0, 250), (frame.shape[1], 250), (0, 255, 0), 2)
        # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
        opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        cnts = cv2.findContours(opening.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)[1]
        green = (0, 255, 0)
        red = (0, 0, 255)
        text = "Number of vehicles"
        text1 = "Left side :" + str(count1)
        text2 = "Right side :" + str(count2)
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(frame, text, (20, 30), font, 0.8, (255, 255, 0), 2)
        cv2.putText(frame, text1, (20, 60), font, 0.6, (255, 0, 255), 2)
        cv2.putText(frame, text2, (380, 60), font, 0.6, (255, 0, 255), 2)
        out.write(frame)
        for cnt in cnts:
            area = cv2.contourArea(cnt)
            if area < 1000:
                continue
            area = cv2.contourArea(cnt)
            (x, y, w, h) = cv2.boundingRect(cnt)
            cv2.rectangle(frame, (x, y), (x + w, y + h), green, 3)
            # cv2.putText(frame,str(area), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
            cnt_x = int(x + w / 2)
            cnt_y = int(y + h / 2)
            cv2.circle(frame, (cnt_x, cnt_y), 7, (255, 255, 255), -1)
            #left side
            if (cnt_y < 254 and cnt_y > 246 and cnt_x < half_width):
                cv2.line(frame, (0, 250), (frame.shape[1], 250), red, 2)
                count1 += 1
                # winsound.Beep(2000,500)
            #right side
            if (cnt_y < 254 and cnt_y > 246 and cnt_x > half_width):
                cv2.line(frame, (0, 250), (frame.shape[1], 250), red, 2)
                count2 += 1
                # winsound.Beep(2000,500)

        # print(count)

        cv2.imshow('frame', frame)
        # cv2.imshow('opening',opening)
        k = cv2.waitKey(1) & 0xFF
        if k == ord('q'):
            break

    out.release()
    cap.release()
    cv2.destroyAllWindows()
    return count1, count2
Exemplo n.º 44
0
def vid_mog_tracking(video_name, vidTrack_setup_parameters):
    run_tme = deque()
    pts_tme = deque()
    pts = deque()

    global mod_pts, mod_pt, run_tme_
    mod_pts = deque()

    ref_col = vidTrack_setup_parameters['ref_col']
    calib_col = vidTrack_setup_parameters['calib_col']

    vid_aspect_ratio = float(
        vidTrack_setup_parameters['loaded_video_aspect_ratio'].split(":")[0]
    ) / float(
        vidTrack_setup_parameters['loaded_video_aspect_ratio'].split(":")[1])
    mod_video_resolution = (400, int(400 / vid_aspect_ratio))

    show_window = vidTrack_setup_parameters['simps']['show_window']
    show_arena_window = vidTrack_setup_parameters['simps']['show_arena_window']
    show_trck_hist = vidTrack_setup_parameters['simps']['show_trck_hist']

    only_sample_arena = vidTrack_setup_parameters['simps']['only_sample_arena']

    x1 = ref_col[0][0]
    x2 = ref_col[1][0]
    y1 = ref_col[0][1]
    y2 = ref_col[1][1]

    cap = cv2.VideoCapture(video_name)

    fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)

    lower_b = np.array(calib_col[0])
    upper_b = np.array(calib_col[1])

    start = float(time.time())

    import OneStopTrack
    _isRunning = OneStopTrack._isRunning

    while _isRunning:
        ret, frame = cap.read()

        if not ret:
            #print("Video recording frame not returned. Video recording may be missing or damaged")
            break

        image = cv2.resize(frame, mod_video_resolution)

        if only_sample_arena:
            hsv = cv2.cvtColor(image[y1:y2, x1:x2], cv2.COLOR_BGR2HSV)
        else:
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

        # Threshold the HSV image to get only blue colours
        mask = cv2.inRange(hsv, lower_b, upper_b)

        fgmask = fgbg.apply(mask)

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1))

        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)

        _, cnts, _ = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,
                                      cv2.CHAIN_APPROX_SIMPLE)
        centre = None

        # only proceed if at least one contour was found
        if len(cnts) > 0:
            # find the largest contour in the mask, then use it to
            # compute the centroid
            c = max(cnts, key=cv2.contourArea)
            M = cv2.moments(c)
            try:
                if only_sample_arena:
                    centre = (int(M["m10"] / M["m00"]) + x1,
                              int(M["m01"] / M["m00"]) + y1)
                else:
                    centre = (int(M["m10"] / M["m00"]),
                              int(M["m01"] / M["m00"]))
                cv2.circle(image, centre, 5, (255, 0, 0), -1)
            except:
                pass

        millis = float(time.time())
        current_time = round(millis - start, 2)

        run_tme.append(current_time)

        run_tme_ = run_tme[-1]

        if centre != None:
            pts_tme.append(current_time)

            if show_trck_hist:
                if len(pts) <= 100:
                    pts.append(centre)
                elif len(pts) > 100:
                    pts = deque(itertools.islice(pts, 1, len(pts)))
                    pts.append(centre)

            mod_pt_x = (centre[0] - ref_col[0][0]) * ref_col[4]

            mod_pt_y = (centre[1] - ref_col[0][1]) * ref_col[5]

            mod_pts.append((mod_pt_x, mod_pt_y))

            mod_pt = mod_pts[-1]

        if show_trck_hist:
            for i in range(1, len(pts)):
                if pts[i - 1] is None or pts[i] is None:
                    continue
                cv2.line(image, pts[i - 1], pts[i], (0, 255, 0), 1)

        if show_window:
            if show_arena_window:
                cv2.imshow("Recorded Video Tracking", image[y1:y2, x1:x2])
            else:
                cv2.imshow("Recorded Video Tracking", image)
        cv2.waitKey(1)

        _isRunning = OneStopTrack._isRunning

    cap.release()
    cv2.destroyAllWindows()
    return (pts_tme, mod_pts, run_tme)
Exemplo n.º 45
0
                    cv2.circle(draw, far, 8, [255, 0, 0], -1)
            if cnt > 0:
                return True, cnt + 1
            else:
                return True, 0
    return False, 0


cam = cv2.VideoCapture(0)

while cam.isOpened():
    ret, image = cam.read()
    image = cv2.bilateralFilter(image, 5, 50, 100)
    image = cv2.flip(image, 1)
    cv2.imshow('input', image)
    bgModel = cv2.createBackgroundSubtractorMOG2(0, 50)  #here 50 is sharpness
    fgmask = bgModel.apply(image)
    kernel = np.ones((3, 3), np.uint8)
    fgmask = cv2.erode(fgmask, kernel, iterations=1)
    img = cv2.bitwise_and(image, image, mask=fgmask)

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    l = np.array([0, 48, 80], dtype="uint8")
    u = np.array([255, 255, 255], dtype="uint8")
    skin = cv2.inRange(hsv, l, u)
    cv2.imshow("hsv image", skin)

    hand = copy.deepcopy(skin)
    contours, hierarchy = cv2.findContours(hand, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    length = len(contours)
Exemplo n.º 46
0
 def __init__(self):
     self.fgbg = cv2.createBackgroundSubtractorMOG2()
     self.kernel = np.ones((100,100),np.uint8)
import numpy as np
import cv2

cap = cv2.VideoCapture(0)  #Open video file

fgbg = cv2.createBackgroundSubtractorMOG2(
    detectShadows=True)  #Create the background substractor
kernelOp = np.ones((3, 3), np.uint8)
kernelCl = np.ones((11, 11), np.uint8)

while (cap.isOpened()):
    ret, frame = cap.read()  #read a frame
    fgmask = fgbg.apply(frame)  #Use the substractor
    try:
        ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
        #Opening (erode->dilate) para quitar ruido.
        mask = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
        #Closing (dilate -> erode) para juntar regiones blancas.
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernelCl)
    except:
        #if there are no more frames to show...
        print('EOF')
        break
    _, contours0, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL,
                                               cv2.CHAIN_APPROX_NONE)
    for cnt in contours0:
        cv2.drawContours(frame, cnt, -1, (0, 255, 0), 3, 8)
    cv2.imshow('Frame', frame)

    k = cv2.waitKey(30) & 0xff
    if k == 27:
Exemplo n.º 48
0
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('/Users/macboiiii/Documents/LearnProjects/OpenCV/PicVid/vtest.avi')
fgbg = cv.createBackgroundSubtractorMOG2()
while(1):
    ret, frame = cap.read()
    fgmask = fgbg.apply(frame)
    cv.imshow('frame',fgmask)
    k = cv.waitKey(30) & 0xff
    if k == 27:
        break
cap.release()
cv.destroyAllWindows()
    while key != esc_key:
        ret, frame = cap.read()
        detection = obj_detector.apply(frame)
        detection = obj_threshold(detection)
        contours = contour_finder(detection)
        contours = filter(contour_filter, contours)

        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            cv2.drawContours(frame, [cnt], -1, (255, 255, 0), 1)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)

        cv2.imshow('frame', frame)
        cv2.imshow('detection', detection)
        key = cv2.waitKey(30) & 0xFF
    pass


#cap = cv2.VideoCapture('C:\\Users\\Jacob\\Documents\\Code\\School\\CSE486\\research\\videos\\FroggerHighway.mp4')
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
obj_detector = cv2.createBackgroundSubtractorMOG2(varThreshold=50)
obj_threshold = lambda img: cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)[1]
contour_finder = lambda img: cv2.findContours(img, cv2.RETR_TREE, cv2.
                                              CHAIN_APPROX_SIMPLE)[0]
contour_filter = lambda cntr: cv2.contourArea(cntr) > 400
run_tracking(cap, obj_detector, obj_threshold, contour_finder, contour_filter)

cv2.track

cap.release()
cv2.destroyAllWindows()
Exemplo n.º 50
0
def background_subtraction(background_image, foreground_image, device, debug=None):
    """Creates a binary image from a background subtraction of the foreground using cv2.BackgroundSubtractorMOG().
    The binary image returned is a mask that should contain mostly foreground pixels.
    The background image should be the same background as the foreground image except not containing the object
    of interest.

    Images must be of the same size and type.
    If not, larger image will be taken and downsampled to smaller image size.
    If they are of different types, an error will occur.

    Inputs:
    background_image       = img object, RGB or binary/grayscale/single-channel
    foreground_image       = img object, RGB or binary/grayscale/single-channel
    device                 = device number. Used to count steps in the pipeline
    debug                  = None, print, or plot. Print = save to file, Plot = print to screen.

    Returns:
    device                 = device number
    fgmask                 = background subtracted foreground image (mask)

    :param background_image: numpy array
    :param foreground_image: numpy array
    :param device: int
    :param debug: str
    :return device: int
    :return fgmask: numpy array
    """

    device += 1
    # Copying images to make sure not alter originals
    bg_img = np.copy(background_image)
    fg_img = np.copy(foreground_image)
    # Checking if images need to be resized or error raised
    if bg_img.shape != fg_img.shape:
        # If both images are not 3 channel or single channel then raise error.
        if len(bg_img.shape) != len(fg_img.shape):
            fatal_error("Images must both be single-channel/grayscale/binary or RGB")
        # Forcibly resizing largest image to smallest image
        print("WARNING: Images are not of same size.\nResizing")
        if bg_img.shape > fg_img.shape:
            width, height = fg_img.shape[1], fg_img.shape[0]
            bg_img = cv2.resize(bg_img, (width, height), interpolation=cv2.INTER_AREA)
        else:
            width, height = bg_img.shape[1], bg_img.shape[0]
            fg_img = cv2.resize(fg_img, (width, height), interpolation=cv2.INTER_AREA)

    # Instantiating the background subtractor, for a single history no default parameters need to be changed.
    if cv2.__version__[0] == '2':
        bgsub = cv2.BackgroundSubtractorMOG()
    else:
        bgsub = cv2.createBackgroundSubtractorMOG2()
    # Applying the background image to the background subtractor first.
    # Anything added after is subtracted from the previous iterations.
    fgmask = bgsub.apply(bg_img)
    # Applying the foreground image to the background subtractor (therefore removing the background)
    fgmask = bgsub.apply(fg_img)

    # Debug options
    if debug == "print":
        print_image(fgmask, "{0}_background_subtraction.png".format(device))
    elif debug == "plot":
        plot_image(fgmask, cmap="gray")
    
    return device, fgmask
Exemplo n.º 51
0
import cv2

# Method 2
vidCap = cv2.VideoCapture('video2.mp4')

# initilize OpenCV - Background Subtractor for KNN and MOG2
BS_KNN = cv2.createBackgroundSubtractorKNN()
BS_MOG2 = cv2.createBackgroundSubtractorMOG2()

vehile = 0
validVehiles = []
while vidCap.isOpened():
    ret, frame = vidCap.read()  # reads the next frame

    # extract the foreground mask
    fgMask = BS_MOG2.apply(frame)

    # draw the reference traffic lines
    cv2.line(frame, (350, 400), (1500, 400), (0, 0, 255), 2)  # RED Line
    cv2.line(frame, (350, 390), (1500, 390), (0, 255, 0),
             1)  # GREEN Offset ABOVE
    cv2.line(frame, (350, 410), (1500, 410), (0, 255, 0),
             1)  # GREEN Offset BELOW

    # extract the contours
    conts, _ = cv2.findContours(fgMask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    for c in conts:
        x, y, w, h = cv2.boundingRect(c)

        # ignore the small contours in size
Exemplo n.º 52
0
small_area = 3000  # filter out the small contours that's not people
resize_width = 500
resize_height = 500

# some global variables
first_processing = True
img_list = []
img_copy_list = []
rec_list = []
img_fixed_list = []
img_index = -1
idx2rec = {}

# background subtraction methods
cv2.ocl.setUseOpenCL(False)
fgbg = cv2.createBackgroundSubtractorMOG2(300, 16, False)

# go through the sample images and detect the motion object
for num in range(2968, 2990):  # 2968-2984  2920-2929
    img_name = 'C:\\Users\\lma5\\Pictures\\burst\\IMG_' + str(num) + '.JPG'
    img = cv2.imread(img_name, cv2.IMREAD_COLOR)
    if img is not None:

        # scale first to 500*500
        img = cv2.resize(img, (resize_width, resize_height),
                         interpolation=cv2.INTER_CUBIC)

        img_index += 1
        idx2rec[img_index] = []

        # change to grayscale for better result
def main():

    number_of_watch_point = 4
    myrover = RoverSignFollower()
    myrover.turnLightsOn()
    signal.signal(signal.SIGINT, _signal_handler)
    print("Battery at " + str(myrover.getBatteryPercentage()) + "%"
          )  #Check battery status
    IMAGE_PATHS = [[] for _ in xrange(number_of_watch_point)]
    current_watch_point = 0
    time.sleep(2)
    sign_counter = 0
    break_point = 0
    while True:
        try:

            while True:
                x, y, w, h, image, cnt = myrover.detect_blue_sign(
                    myrover.getImageName())
                gap = myrover.compare_centroids('tmp.jpg', x, y, w, h)
                if abs(
                        gap
                ) > 10:  # Orient towards the center of the sign and move towards it.
                    if gap < 0:
                        myrover.turn(gap * TURN_VALUE)
                    if gap > 0:
                        myrover.turn(gap * TURN_VALUE)
                if w * h < 29000:  # Keep moving till the sign is big enough to read the command.
                    myrover.moveForward(0.2, 0.5)
                    time.sleep(0.1)
                else:
                    x, y, w, h, action_image, action_cnt = myrover.detect_blue_sign(
                        myrover.getImageName())
                    gap = myrover.compare_centroids('tmp.jpg', x, y, w, h)
                    bs_img = action(x, y, w, h, action_image, action_cnt)
                    if np.count_nonzero(bs_img) > 300000:
                        IMAGE_PATHS[current_watch_point].append(bs_img)
                    break_point = break_point + 1
                    print break_point

                if break_point > 30:
                    break_point = 0
                    break

            fgbg = cv2.createBackgroundSubtractorKNN()
            fgbg2 = cv2.createBackgroundSubtractorMOG2()
            # Background subtraction
            for iter in xrange(len(IMAGE_PATHS[current_watch_point])):
                frame = IMAGE_PATHS[current_watch_point][iter]
                frame2 = frame.copy()
                #cv2.imshow('frame', frame)
                fgmask = fgbg.apply(frame)
                fgmask2 = fgbg2.apply(frame2)
                #cv2.imshow('frame with KNN', fgmask)
                #cv2.imshow('frame with MOG', fgmask2)

                alarm = np.count_nonzero(fgmask)
                if alarm > 4000:
                    img = cv2.imread("warning.jpg")
                    cv2.imshow("warning.jpg", img)

                    k = cv2.waitKey(30) & 0xff

                    if k == 27:
                        break

            print 'shot'

            current_watch_point = (current_watch_point +
                                   1) % number_of_watch_point
            myrover.turn(165)
            time.sleep(2)

        except:
            traceback.print_exc()
            print("No Purple sign detected")
            #myrover.moveForward(0.2,-0.2) # If no blue sign detected, move forward a little and look again.
            sign_counter += 1
            if sign_counter > 3:
                sign_counter = 0
                myrover.moveForward(0.5, -0.5)
Exemplo n.º 54
0
    def run(self):
        self.calibrate_cam_to_proj()
        display = Display(screen_width=self.screen_size[0])
        sound = Sound(wav_directory="wav")
        cap = cv2.VideoCapture(1)
        frame_num = 0
        note_num = 0  # note index in the song
        is_initial_song_played = True  # Flag which indicate if we did first play of the song
        is_clicked = False  # Flag which indicate if user pressed on key
        history_frame_num = 10
        erode_kernel = np.ones((5, 5), np.uint8)
        history_pts = None
        aruco_detect_params = cv2.aruco.DetectorParameters_create()
        aruco_detect_params.doCornerRefinement = True
        fgbg = cv2.createBackgroundSubtractorMOG2(history=4,
                                                  varThreshold=50.0,
                                                  detectShadows=False)
        while True:
            # Get an image from camera
            img = self._get_image(cap_obj=cap)
            frame_num += 1
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            fgmask = fgbg.apply(gray)  # Add to background subtraction model

            # Find the piano board AruCo markers
            corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
                gray, self.aruco_dict, parameters=aruco_detect_params)
            cv2.aruco.drawDetectedMarkers(img, corners, ids)

            # Display image for debug
            img_debug = img.copy()
            cv2.putText(img_debug, "%d" % frame_num, (8, 25),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0))
            cv2.imshow('camera', img_debug)

            # If no markers were found continue to next frame
            if ids is None:
                continue

            # If we found the piano markers
            if len(ids) > 0:
                if frame_num % self.update_piano_corners_freq == 0:
                    self.piano.update_coordinates(corners, ids)

            # Project a key
            self.img_to_project.fill(0)
            if self.piano.is_initialize():
                if self.song[note_num] != 'br':
                    # If note is not break
                    piano_key_ind = self.piano.get_key_index_by_name(
                        self.song[note_num])
                    pts = self.piano.get_key_polygon(piano_key_ind)
                    if not (is_initial_song_played):
                        color = self.piano.get_key_color(piano_key_ind)
                    else:
                        color = (0, 0, 255)
                    cv2.fillPoly(self.img_to_project, [pts], color,
                                 cv2.LINE_AA)
                    x = int((pts[0, 0, 0] + pts[1, 0, 0]) / 2.0 - 5)
                    y = int((pts[0, 0, 1] + pts[1, 0, 1]) / 2.0 - 10)

                    cv2.putText(self.img_to_project,
                                "%s" % self.song[note_num], (x, y),
                                cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0))

                    # cv2.putText(self.img_to_project, "%d" % piano_key_ind, tuple(pts[3, 0, :]),
                    #             cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0))

                else:
                    if is_initial_song_played:
                        is_clicked = True

                # Play sound
                if not (is_initial_song_played):
                    # Play note sound
                    sound.play_note_sound(self.song[note_num])

                    # Advance to the next note
                    time.sleep(0.5)
                    note_num += 1
                    history_frame_num = frame_num
                else:
                    if is_clicked:
                        sound.play_note_sound(self.song[note_num])
                        time.sleep(0.5)
                        note_num += 1
                        is_clicked = False
                        history_frame_num = frame_num
                        # history_pts = pts

                # Check if song has ended
                if note_num >= len(self.song):
                    print("Song Finished!")
                    time.sleep(0.5)
                    if not (is_initial_song_played):
                        is_initial_song_played = True
                        note_num = 0
                    else:
                        break

            # Plot debug image
            # cv2.imshow('img_to_project', self.img_to_project)

            # Detect key press
            if frame_num > history_frame_num + 7:
                key_mask = cv2.cvtColor(self.img_to_project,
                                        cv2.COLOR_BGR2GRAY) > 5
                key_mask = np.uint8(key_mask) * 255
                key_mask = cv2.erode(key_mask, erode_kernel)
                key_mask = key_mask.astype(bool)
                fgmask[~key_mask] = 0
                num_pixels_in_key = np.sum(key_mask)
                num_pixels_changed = np.sum(fgmask > 0)
                frac_pixels_changed = float(num_pixels_changed) / float(
                    num_pixels_in_key)
                if frac_pixels_changed > 0.05:
                    print("Key clicked | Num pixels = %d | fraction = %.3f" %
                          (num_pixels_changed, frac_pixels_changed))
                    is_clicked = True
                cv2.imshow('background_mask', fgmask)
            else:
                if history_pts is not None:
                    cv2.fillPoly(self.img_to_project, [history_pts],
                                 (0, 255, 0), cv2.LINE_AA)

            # Transform image to projector coordinates
            dst = cv2.warpPerspective(self.img_to_project, self.cam_to_proj,
                                      self.screen_size)
            dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
            display.show_array(dst)

            # Wait for key from user
            key = cv2.waitKey(1)
            if key & 0xFF == ord(self.key_quit):
                break

        # When everything done, release the capture
        cap.release()
        cv2.destroyAllWindows()
        display.close()
Exemplo n.º 55
0
def detect_video(video, ringPoint, showIt):
    camera = cv2.VideoCapture(video)
    history = 20  # 训练帧数

    # bs = cv2.createBackgroundSubtractorKNN(detectShadows=True)  # 背景减除器,设置阴影检测
    # bs = cv2.bgsegm.createBackgroundSubtractorMOG(history=history) # 效果不行
    bs = cv2.createBackgroundSubtractorMOG2(history=history,
                                            detectShadows=True)  #这个效果最好
    bs.setHistory(history)
    img_width, img_height = camera.get(3), camera.get(4)
    # print(img_width , img_height)
    ball_x = 0
    ball_y = 0
    detected = True

    frames = 0
    points = []
    ring_x, ring_y, ring_r = ringPoint
    while True:
        res, frame = camera.read()
        frame = cv2.GaussianBlur(frame, (21, 21), 0)
        if not res:
            # 若读取失败则跳出循环
            break
        # cv2.imshow("first" , frame)
        fg_mask = bs.apply(frame)  # 获取 foreground mask
        # cv2.imshow("frame" , fg_mask)
        if frames < history:
            frames += 1
            continue

        # 对原始帧进行膨胀去噪
        # 二值化阈值处理,前景掩码含有前景的白色值以及阴影的灰色值,在阈值化图像中,将非纯白色(244~255)的所有像素都设为0,而不是255
        th = cv2.threshold(fg_mask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
        th = cv2.erode(th,
                       cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                       iterations=2)
        dilated = cv2.dilate(th,
                             cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (8, 6)),
                             iterations=2)
        # 获取所有检测框
        image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL,
                                                 cv2.CHAIN_APPROX_SIMPLE)
        # print(len(contours))
        if (len(contours) > 4):
            #print("shock , no ball")
            continue

        for c in contours:
            # print(c)
            # 获取矩形框边界坐标
            x, y, w, h = cv2.boundingRect(c)
            #print([x,y,w,h])
            # 计算矩形框的面积
            area = cv2.contourArea(c)
            #print(area)
            # 如果是640,7mi则用200<area <2500(比赛不用7米,是6米)
            # 如果是1280,2m高,6m远 ,area用600~8000(放弃这种参数)
            # 如果是640的,2m高,6m远 ,area用600~8000
            # 如果是320的,2m高,6m远,area用120~8000
            if 120 < area < 8000:
                # print([ball_x,ball_y])
                # 如果是640或1280的,则用y < (img_height*0.41) and (img_width*0.15) < x < (img_width*0.78)
                # 或者如果是640或1280的,则用y < (img_height*0.41) and (ring_x-ring_r-100) < x < (ring_x+ring_r+100)(建议)
                # 如果是320的,用y < (img_height*0.41) and (ring_x-ring_r-50) < x < (ring_x+ring_r+50)
                if ball_x == 0 and ball_y == 0 and y < (
                        img_height * 0.41) and (ring_x - ring_r - 50) < x < (
                            ring_x + ring_r + 50):
                    ball_x = x
                    ball_y = y
                    # print("First ball get",[ball_x ,ball_y])
                    points.append([x, y, w, h])
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                else:
                    # 640的,用 ball_x !=0 and (ball_x - (img_width*0.15)) < x < (ball_x+(img_width*0.15)) and ( y < (ball_y+img_height*0.5))
                    # 320的,用 ball_x !=0 and (ball_x - (img_width*0.15)) < x < (ball_x+(img_width*0.15)) and ( y < (ball_y+img_height*0.35))
                    if ball_x != 0 and (ball_x - (img_width * 0.15)) < x < (
                            ball_x +
                        (img_width * 0.15)) and (y <
                                                 (ball_y + img_height * 0.35)):
                        if (y) > (ring_y + ring_r) or (y + 10) > (
                                ring_y + ring_r) or (y + h + 5) > img_height:
                            # 小球结束,小球走到了环的下面
                            # 640,用(y ) > (ring_y + ring_r) or (y + 10) > (ring_y + ring_r)
                            # 320,用 (y ) > (ring_y + ring_r) or (y + 10) > (ring_y + ring_r) or (y+h+5) > img_height
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            # print("ball is done!")
                            camera.release()
                            break
                        else:
                            ball_x = x
                            ball_y = y
                            cv2.rectangle(frame, (x, y), (x + w, y + h),
                                          (0, 255, 0), 2)
                            # print("Next ball",[ball_x ,ball_y])
                            points.append([x, y, w, h])
                            break

                    elif ball_x == 0 and ball_y == 0 or y < ball_y:
                        # print("no ball")
                        break
                    else:
                        # print("Scanner Moved!")
                        ball_y += (img_height * 0.1)
                        break
        if (showIt):
            cv2.imshow("detection", frame)
            cv2.imshow("back", dilated)
        k = cv2.waitKey(110) & 0xff
        if k == 27:
            break
    if ball_x == 0 and ball_y == 0:
        # print("Didn't throw the ball")
        detected = False
    camera.release()
    cv2.destroyAllWindows()
    return points, detected
Exemplo n.º 56
0
    s = smtplib.SMTP('smtp.gmail.com', 587)
    s.ehlo()
    s.starttls()
    s.ehlo()
    s.login(' ', '')  #enter e-mail of sender and password
    s.sendmail(msg['From'], msg['To'], msg.as_string())
    s.quit()
    return final_id


final_id = 0
initial_id = 1
thresold = 10

cam = cv2.VideoCapture(0)
move = cv2.createBackgroundSubtractorMOG2()

if not os.path.exists('proof'):
    os.makedirs('proof')

for the_file in os.listdir('proof'):
    file_path = os.path.join('proof', the_file)
    if os.path.isfile(file_path):
        os.unlink(file_path)

while True:
    _, frame = cam.read()
    white_position = move.apply(frame)
    movement = np.array(white_position)
    white_pixel = np.count_nonzero(movement)
    percentage = (white_pixel * 100) / np.size(movement)
Exemplo n.º 57
0
def video_feed_counter(conf, mode, input, output, url, camera):
    # construct the argument parser and parse the arguments
    # load the configuration file
    conf = Conf(conf)
    count = 0
    # initialize the MOG foreground background subtractor object
    # mog = cv2.bgsegm.createBackgroundSubtractorMOG()
    mog = cv2.createBackgroundSubtractorMOG2()
    # initialize and define the dilation kernel
    dKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # initialize the video writer process
    writerProcess = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker and initialize a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(conf["max_disappeared"], conf["max_distance"])
    trackableObjects = {}

    # if a video path was not supplied, grab a reference to the webcam
    # if not args.get("input", False):
    # if input:
    # 	print("[INFO] starting video stream...")
    # 	# vs = VideoStream(src=0).start()
    # 	vs = VideoStream(usePiCamera=True).start()
    # 	time.sleep(2.0)

    # otherwise, grab a reference to the video file
    # else:
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(url, cv2.CAP_FFMPEG)
    # vs = cv2.VideoCapture(args["input"])

    # check if the user wants to use the difference flag feature
    if conf["diff_flag"]:
        # initialize the start counting flag and mouse click callback
        start = False
        cv2.namedWindow("set_points")
        cv2.setMouseCallback("set_points", set_points, [mode])

    # otherwise, the user does not want to use it
    else:
        # set the start flag as true indicating to start traffic counting
        start = True

    # initialize the direction info variable (used to store information
    # such as up/down or left/right vehicle count) and the difference
    # point (used to differentiate between left and right lanes)
    directionInfo = None
    diffPt = None
    fps = FPS().start()
    # print('fbs')
    # loop over frames from the video stream
    while (vs.isOpened()):
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        # frame = vs.read()
        ret, frame = vs.read()  # import image
        # if not ret:
        # 	frame = cv2.VideoCapture(url)
        #     continue
        # if ret:
        #     frame = cv2.VideoCapture(url)
        #     continue

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if input is not None and frame is None:
            break
        #print("frame in while")

        # check if the start flag is set, if so, we will start traffic
        # counting

        if start:
            # if the frame dimensions are empty, grab the frame
            # dimensions, instantiate the direction counter, and set the
            # centroid tracker direction

            if W is None or H is None:
                # start the frames per second throughput estimator
                #fps = FPS().start()
                (H, W) = frame.shape[:2]
                dc = DirectionCounter(mode, W - conf["x_offset"],
                                      H - conf["y_offset"])
                ct.direction = mode

                # check if the difference point is set, if it is, then
                # set it in the centroid tracker object
                if diffPt is not None:
                    ct.diffPt = diffPt

            # begin writing the video to disk if required
            if output is not None and writerProcess is None:
                # set the value of the write flag (used to communicate when
                # to stop the process)
                writeVideo = Value('i', 1)

                # initialize a shared queue for the exhcange frames,
                # initialize a process, and start the process
                frameQueue = Queue()
                writerProcess = Process(target=write_video,
                                        args=(output, writeVideo, frameQueue,
                                              W, H))
                writerProcess.start()

            # initialize a list to store the bounding box rectangles
            # returned by background subtraction model
            rects = []

            # convert the frame to grayscale image and then blur it
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)

            # apply the MOG background subtraction model which returns
            # a mask
            mask = mog.apply(gray)

            # apply dilation
            dilation = cv2.dilate(mask, dKernel, iterations=2)

            # find contours in the mask
            cnts = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            # loop over each contour
            for c in cnts:
                # if the contour area is less than the minimum area
                # required then ignore the object
                if cv2.contourArea(c) < conf["min_area"]:
                    continue

                # get the (x, y)-coordinates of the contour, along with
                # height and width
                (x, y, w, h) = cv2.boundingRect(c)

                # check if direction is *vertical and the vehicle is
                # further away from the line, if so then, no need to
                # detect it
                if mode == "vertical" and y < conf["limit"]:
                    continue

                # otherwise, check if direction is horizontal and the
                # vehicle is further away from the line, if so then,
                # no need to detect it
                elif mode == "horizontal" and x > conf["limit"]:
                    continue

                # add the bounding box coordinates to the rectangles list
                rects.append((x, y, x + w, y + h))

            # check if the direction is vertical
            if mode == "vertical":
                # draw a horizontal line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'up' or 'down'
                cv2.line(frame, (0, H - conf["y_offset"]),
                         (W, H - conf["y_offset"]), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw
                # a line diving the two lanes
                if diffPt is not None:
                    cv2.line(frame, (diffPt, 0), (diffPt, H), (255, 0, 0), 2)

            # otherwise, the direction is horizontal
            else:
                # draw a vertical line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'left' or 'right'
                # print('ddds')
                cv2.line(frame, (W - conf["x_offset"], 0),
                         (W - conf["x_offset"], H), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw a
                # line dividing the two lanes
                if diffPt is not None:
                    cv2.line(frame, (0, diffPt), (W, diffPt), (255, 0, 0), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the
                # current object ID and initialize the color
                to = trackableObjects.get(objectID, None)
                color = (0, 0, 255)

                # create a new trackable object if needed
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can
                # utilize it to determine direction
                else:
                    # find the direction and update the list of centroids
                    dc.find_direction(to, centroid)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:

                        # find the direction of motion of the vehicles
                        directionInfo = dc.count_object(to, centroid, camera)

                    # otherwise, the object has been counted and set the
                    # color to green indicate it has been counted
                    else:
                        color = (0, 255, 0)

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

            # extract the traffic counts and write/draw them
            if directionInfo is not None:
                for (i, (k, v)) in enumerate(directionInfo):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # put frame into the shared queue for video writing
            if writerProcess is not None:
                frameQueue.put(frame)

            # show the output frame
            # cv2.imshow("Frame", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # update the FPS counter
            fps.update()

        # otherwise, the user has to select a difference point
        else:
            # show the output frame
            # cv2.imshow("set_points", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `s` key was pressed, start traffic counting
            if key == ord("s"):
                # begin counting and eliminate the informational window
                start = True
                cv2.destroyWindow("set_points")

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    # if not args.get("input", False):
    # 	vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
import sys
import cv2 as cv


if len(sys.argv) < 2:
    print(f"Program usage: {sys.argv[0]} video_filename")
    exit(1)

cap = cv.VideoCapture(sys.argv[1])

# Gaussian Mixture-based Background/Foreground Segmentation Algorithm.
# Default values
fgbg = cv.createBackgroundSubtractorMOG2(
    history=500, varThreshold=16, detectShadows=True
)

while True:

    ret, frame = cap.read()

    if not ret:
        break

    fgmask = fgbg.apply(frame)

    cv.namedWindow("original", cv.WINDOW_NORMAL)
    cv.imshow("original", frame)

    cv.namedWindow("mask", cv.WINDOW_NORMAL)
    cv.imshow("mask", fgmask)
Exemplo n.º 59
0
import numpy as np
import cv2
from scipy.spatial import distance
#lower = np.array([0, 133, 100], dtype = "uint8")
#upper = np.array([255, 173, 127], dtype = "uint8")

#lower = np.array([0, 48, 80], dtype = "uint8")
#upper = np.array([20, 255, 255], dtype = "uint8")

cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2(history=20,
                                          varThreshold=16,
                                          detectShadows=False)
#fgbg = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400.0, detectShadows=False)

while True:
    ret, frame = cap.read()

    fgmask = fgbg.apply(frame)
    #contours
    im2, contours, hierarchy = cv2.findContours(fgmask, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
    cnt = contours[0]
    cv2.drawContours(fgmask, contours, -1, (0, 255, 0), 3)
    if len(contours) > 0:
        # find largest contour in mask, use to compute minEnCircle
        c = max(contours, key=cv2.contourArea)
        (x, y), radius = cv2.minEnclosingCircle(c)
        M = cv2.moments(c)
        x_ = int(x - radius)
        y_ = int(y - radius)
Exemplo n.º 60
0
import cv2
import numpy as np
 
cap = cv2.VideoCapture("Intrusion_1.mp4")
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
kernel_dil = np.ones((20,20), np.uint8) 
subtractor = cv2.createBackgroundSubtractorMOG2(history=20, varThreshold=25, detectShadows=True)
 
while True:
    ret, frame = cap.read()

    if ret == True:
    	mask = subtractor.apply(frame)
    	mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
    	dilation = cv2.dilate(mask, kernel_dil, iterations=1)
    	(contours, hierarchy) = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for pic, contour in enumerate(contours):
			area = cv2.contourArea(conrour)
			if(area>3000):
				cv2.putText(frame, "Intrusion Detected", cv2.FONT_HERSHLEY_DUPLEX, 1, (0,255,0), 2)

		cv2.imshow("Frame", frame)
		cv2.imshow("Mask", mask)
		key = cv2.waitKey(30)
		if key == 27:
			break

cap.release()
cv2.destroyAllWindows()