Esempio n. 1
0
 def __init__(self,ratio=0.002,learningRate=-1):
     self.bgSubtractor = cv2.createBackgroundSubtractorKNN()
     #self.bgSubtractor = cv2.BackgroundSubtractorMOG2()
     self.ratio = ratio
     self.learningRate = learningRate
     self.scale = 1
     self.fgmask = np.array([])
     #self.bgm = np.array([])
     self.contourList = []
     self.maxContour = []
     self.bbox = []
     self.contourRects = []
     self.ret = False
    def __init__(self, video_src):
        self.cam = cv2.VideoCapture(video_src)
        ret, self.frame = self.cam.read()
        cv2.namedWindow('gesture_hci')

        self.cmd_switch = False
        self.mask_lower_yrb = np.array([54, 131, 110])      #[54, 131, 110]
        self.mask_upper_yrb = np.array([143, 157, 155])     #[163, 157, 135]
        
        self.fgbg = cv2.createBackgroundSubtractorKNN()
        #self.fgbg = cv2.BackgroundSubtractorMOG2(history=120, varThreshold=50, bShadowDetection=True)

        # create trackbar for skin calibration
        self.calib_switch = False
def main():
    # ####upper and lower bounds for colors
    # yellow bounds from:
    # https://stackoverflow.com/questions/9179189/detect-yellow-color-in-opencv
    camera = cv2.VideoCapture(0)

    lower_blue = np.array([110, 50, 50])
    upper_blue = np.array([130, 255, 255])
    lower_yellow = np.array([20, 100, 100])
    upper_yellow = np.array([30, 255, 255])
    grabBackground = 0;
    fgbg = None;
    reduction =None
    background = None
    grabbed = False
    while True:
        (ret, img_frame) = camera.read()
        # set up img_frame for masking
        img_frame = imutils.resize(img_frame, width=900)
        img_frame = cv2.flip(img_frame, 1)
        blah = img_frame.copy()
        if (grabBackground <30):
            grabBackground += 1
        elif not grabbed:
            fgbg = cv2.createBackgroundSubtractorKNN()
            background = img_frame.copy()
            grabbed = True

        # create mask to block out all colors but blue
        if fgbg != None:
            img_frame = fgbg.apply(img_frame)
        if background != None:
            res = cv2.subtract(background,blah)

        else: res = img_frame.copy()
        res = cv2.erode(res, None, iterations=1)
        res = cv2.dilate(res, None, iterations=1) 
        img_frame = cv2.erode(img_frame, None, iterations=2)
        img_frame = cv2.dilate(img_frame, None, iterations=2)     
        cv2.imshow("other", res)
        cv2.imshow("Frame", img_frame)

        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break
    # cleanup the camera and close any open windows
    camera.release()
    cv2.destroyAllWindows()
Esempio n. 4
0
def maincv2(path):
    k=9
    vcap = cv2.VideoCapture('rtsp://*****:*****@172.16.56.108:554')       #摄像头:'rtsp://*****:*****@172.17.13.250:554/h264/'+ch+'/main/av_stream'
    fgbg = cv2.createBackgroundSubtractorKNN()               #用非参数化方法背景建模
    while True:
        ret, frame = vcap.read()
        frame = cv2.GaussianBlur(frame,(5,5),0)
        #frame = imutils.resize(frame, width=int(800))
        fgmask = fgbg.apply(frame)                 #获得前景(灰度图像)
        if ret:
            k+=1
            print 'k',k
            filename=path+'result/'+'image'+str(k)+'.png'
            #nametv = path+'imagetv'+str(k)+'.png'
            #namedelta = path+'imagedelta'+str(k)+'.png'
            #cv2.imwrite(nametv, fgmask)
            if k%10==0:
                #print filename
                num = 0
                img=imutils.resize(fgmask,width=500)
                #print 'img',img
                filenametv=path+'lasttv.png'                  #用来比较的前一帧图片的文件名
                #print filenametv
                if os.path.exists(filenametv):
                    img2=cv2.imread(filenametv,0)
                    #print 'img2',img2
                else:
                    cv2.imwrite(filenametv,img)
                    img2 = img
                img3=img
                img4=img2
                #print 'img3',img3,img3.shape[:2]
                #print 'img4',img4,img4.shape[:2]
                frameDelta = cv2.absdiff(img3,img4)
                #frameDelta=img3-img4
                #cv2.imwrite(namedelta, frameDelta)
                (h,w)=frameDelta.shape[:2]
                print h,w
                frameDelta = cv2.threshold(frameDelta,128,255,cv2.THRESH_BINARY)
                #print frameDelta[1]

                for i in range(h):
                    for j in range(w):
                        if frameDelta[1][i][j]>100:
                            num+=1
                print num
                if float(num)/(w*h)>0.01:
                    cv2.imwrite(filename,frame)
                    cv2.imwrite(filenametv,img)
Esempio n. 5
0
def maincv2(path):
    k=1
    fgbg = cv2.createBackgroundSubtractorKNN()
    while True:
        filename=path+'njue-108-'+str(k)+'.jpeg'
        frame = cv2.imread(filename)
        print 'k',k
        time.sleep(1)
        k=k+1
        #frame = imutils.resize(frame, width=int(800))
        fgmask = fgbg.apply(frame)
        nametv = path+'imagetv'+str(k)+'.png'
        #namedelta = path+'imagedelta'+str(i)+'.png'
        #cv2.imwrite(nametv, fgmask)
        num = 0
        img=fgmask
        #img=imutils.resize(fgmask,width=500)
        #print 'img',img
        filenametv=path+'lasttv.png'                  #用来比较的前一帧图片的文件名
        #print filenametv
        if os.path.exists(filenametv):
            img2=cv2.imread(filenametv,0)
            #print 'img2',img2
        else:
            cv2.imwrite(filenametv,img)
            img2 = img
        img3=img
        img4=img2
        #print 'img3',img3,img3.shape[:2]
        #print 'img4',img4,img4.shape[:2]
        frameDelta=cv2.absdiff(img3,img4)
        (h,w)=frameDelta.shape[:2]
        print h,w
        frameDelta = cv2.threshold(frameDelta,128,255,cv2.THRESH_BINARY)
        #print frameDelta[1]

        for i in range(h):
            for j in range(w):
                if frameDelta[1][i][j]>100:
                    num+=1
        print (num)
        print w*h
        if float(num)/(w*h)<0.02:
            os.remove(filename)
        else:
            cv2.imwrite(filenametv,img)
    def procces(self):
        cap = cv2.VideoCapture(0)
        #o BG subtraction KNN funcionou melhor nos meus testes
        #fgbg = cv2.createBackgroundSubtractorMOG2()
        fgbg = cv2.createBackgroundSubtractorKNN()

        firstFrame = None
        count = 0

        aaa = np.zeros((300,512,3), np.uint8)
        cv2.namedWindow('slider')
        cv2.createTrackbar("Gauss", "slider", 1, 25, self.callback_gauss)
        cv2.createTrackbar("thresh1", "slider", 1, 255, self.callback_thresh1)
        cv2.createTrackbar("thresh2", "slider", 1, 255, self.callback_thresh2)

        while(True):
                #Como o obturador das cameras demora um pouco para se adaptar espero a aquisicao de alguns frames
                #antes de comecar o processamento das imagens
                if count == 15:
                        ret, frame = cap.read()
                        #Nao sei pq minha webcam pegava a imagem invertida
                        #Se ficar de ponta cabeca remova essa linha
                        frame = cv2.flip(frame,0)
                        
                        #Defino o primeiro frame como o demonstrativo para o Heat Map
                        if firstFrame ==  None:
                                firstFrame = frame
                        
                        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                        
                        #Existem algumas opcoes de filtro passa-baixo, estou utilziando a MedianBlur pois esse metodo
                        #preserva as bordas da imagem ao contrario do GaussianBlur
                        Gauss = cv2.GaussianBlur(gray, (self.gauss, self.gauss), 0)

                        thresh = cv2.threshold(Gauss, self.thresh1, self.thresh2, cv2.THRESH_BINARY)[1]
                        
                        cv2.imshow("slider", Gauss)
                        cv2.imshow("thresh", thresh)
                        k = cv2.waitKey(30) & 0xff
                        if k == 27:
                            break
                else:
                        count += 1

        cap.release()
        cv2.destroyAllWindows()
    def __init__(self, subMethod, display_image, acc, thresh, moghistory, mogvariance):

        ##Subtractor Method
        self.subMethod = subMethod

        ####Create Background Constructor
        if self.subMethod in ["Acc", "Both"]:
            self.running_average_image = np.float32(display_image)
            self.accAvg = acc
            self.threshT = thresh

        if self.subMethod in ["MOG", "Both"]:
            # MOG method creator
            self.fgbg = cv2.createBackgroundSubtractorMOG2(history=moghistory, detectShadows=False)
        if self.subMethod == "KNN":
            # MOG method creator
            self.fgbg = cv2.createBackgroundSubtractorKNN()
def maincv2(path):
    #print path
    i =0                                  #i记录帧数
    vcap = cv2.VideoCapture(path)       #读取图片序列
    print vcap.read()
    fgbg = cv2.createBackgroundSubtractorKNN()               #用混合高斯模型背景建模
    while True:
        ret, frame = vcap.read()
        print frame
        fgmask = fgbg.apply(frame)                 #获得前景(灰度图像)
        '''
        cv2.imshow('fgmask', fgmask)
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            break
        '''
        print ret
        if ret:
            name = 'nn'
            i+=1
            print i

            if i%1==0:
                name = str(path[:-11])+'image'+str(i)+'.png'
                print name
                picdir,picname = os.path.split(name)
                #print picdir
                ISOTIMEFORMAT='%Y-%m-%d %X'
                newname=picdir+'/'+str(time.strftime( ISOTIMEFORMAT, time.localtime() ))+'.png'
                print newname
                os.rename(name,newname)
                print name



                nametv = str(path[:-11])+'imagetv'+str(i)+'.png'
                namedelta = str(path[:-11])+'imagedelta'+str(i)+'.png'           #获得nametv和namedelta的路径
                #print nametv
                #print namedelta
                #cv2.imwrite(name, frame)                 #将当前帧写入name
                cv2.imwrite(nametv, fgmask)              #将当前帧的二值图像写入nametv
                test(path, newname, nametv, namedelta)                       #差帧
Esempio n. 9
0
    def run(self):

        print('b')
        #cap = cv2.VideoCapture('D:\\Work_Documents\\sandbox\\OpenCV\\with_EEN\\viaVLC\\EN-CDUM-002a+2016-08-29+14-38-40.mp4') #Open video file
        cap = cv2.VideoCapture(
            'http://*****:*****@192.168.11.3:10226/snapshot.cgi?.mjpeg')
        #cap = cv2.VideoCapture('http://127.0.0.1:8080')

        fps = 15  #int(cap.get(5)+4)
        print('Current FPS is ' + str(fps))
        #cv2.ocl.setUseOpenCL(False)
        fgbg = cv2.createBackgroundSubtractorKNN(
            detectShadows=True)  # 背景差分の取得にk近傍法(kNN法)を用いる

        # initialize var and windows
        itr = 0
        font = cv2.FONT_HERSHEY_SIMPLEX
        old_center = np.empty((0, 2), float)
        detect_state = np.zeros(60, dtype=np.int)
        detect_state_point = 0
        count = 0
        self.now_count = 0
        '''
        cv2.namedWindow("Frame", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
        cv2.namedWindow("Background Substraction", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
        cv2.namedWindow("Contours", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL)
        '''

        # define functions
        def padding_position(x, y, w, h, p):  # 矩形の上下左右にpだけ広げる
            return x - p, y - p, w + p * 2, h + p * 2

        # find a nearest neighbour point
        def serchNN(p0, ps):
            L = np.array([])
            for i in range(ps.shape[0]):
                L = np.append(L, np.linalg.norm(ps[i] - p0))
            return ps[np.argmin(L)]

        # 2つのベクトルが交差しているかを判定する。
        # 2つのベクトルが交差している場合、互いに自身のベクトルから見てもう一つのベクトルの両端は左右に存在することになる。
        # つまり、自身のベクトルと、自身の始点からもう一つのベクトルの両端へのベクトルの外積は必ず正・負になり、その積は必ず負になる。
        # 両方のベクトルでその計算をし、両方負になれば2つのベクトルは交差していると言える。
        def intersect_direction(ap1, ap2, bp1, bp2):
            cross1 = np.cross(ap2 - ap1, bp1 - ap1)
            cross2 = np.cross(ap2 - ap1, bp2 - ap1)
            calc1 = cross1 * cross2
            if (calc1 < 0):
                calc2 = np.cross(bp2 - bp1, ap1 - bp1) * np.cross(
                    bp2 - bp1, ap2 - bp1)
                if (calc2 < 0):
                    return 1 if cross1 < 0 else -1  #交差した場合、左から右に通過した場合は1、右から左に通過した場合は-1を返す。
            return 0

        # 直前のフレームでの動体検知状態を保持しておき、現在動体が存在するかどうか決定する。
        # 要はチャタリング防止用関数。
        def detected(detect_cnt):
            global detect_state
            global detect_state_point
            detect_state[detect_state_point] = detect_cnt

            if detect_state_point != detect_state.shape[0] - 1:
                detect_state_point = detect_state_point + 1
            else:
                detect_state_point = 0

            if detect_state.shape[0] // 2 <= detect_state.sum():
                return True
            else:
                return False

        # apply convexHull to the contour
        def convHull(cnt):
            epsilon = 0.1 * cv2.arcLength(cnt, True)  # 領域を囲む周囲長を計算
            approx = cv2.approxPolyDP(
                cnt, epsilon,
                True)  # 領域の形状に近似した形状を取得、epsilonが小さければ小さいほど近似の精度が高くなる
            hull = cv2.convexHull(
                cnt, returnPoints=True)  # 凸包(図形を紐で囲い締め上げたときに取る形状)を取得
            return hull

        # detect a centroid from a coutour
        def centroidPL(cnt):
            M = cv2.moments(cnt)  # モーメントを取得
            cx = int(M['m10'] / M['m00'])  # モーメントから重心のx軸を計算
            cy = int(M['m01'] / M['m00'])  # モーメントから重心のy軸を計算
            return cx, cy

        # display 1st frame and set counting line
        ret, img = cap.read()
        img = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        imgr = img.copy()  # 参照型のため同じオブジェクトを参照しないようコピー
        sx, sy = -1, -1
        ex, ey = -1, -1

        def draw_line(event, x, y, flags, param):  # ドラッグの始点から終点に直線を引く
            global sx, sy, ex, ey

            if event == cv2.EVENT_LBUTTONDOWN:
                sx, sy = x, y

            elif event == cv2.EVENT_LBUTTONUP:
                cv2.line(img, (sx, sy), (x, y), (255, 0, 0), 2)
                ex, ey = x, y

        # initialize line
        lp0 = (sx, sy)
        lp1 = (ex, ey)
        nlp0 = np.array([lp0[0], lp0[1]], float)
        nlp1 = np.array([lp1[0], lp1[1]], float)

        while (cap.isOpened()):
            count = 0
            try:
                ret, o_frame = cap.read()  #read a frame
                frame = cv2.resize(
                    o_frame, (o_frame.shape[1] // 2, o_frame.shape[0] // 2))

                #Use the substractor
                fgmask = fgbg.apply(frame)  #現在のシーンをkNN法でグレースケール化する?
                fgmask_o = fgmask.copy()

                fgmask = cv2.threshold(
                    fgmask, 244, 255,
                    cv2.THRESH_BINARY)[1]  #グレースケール化された画像を2値化する?
                # kernel = np.ones((5,5), np.uint8)
                # fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
                # cv2.dilate(入力画像, カーネル, 繰り返し回数) 画像のモルフォロジー変換(膨張処理)
                fgmask = cv2.dilate(
                    fgmask,
                    cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                    iterations=2)  # 2値化された画像の白い箇所を円形カーネルで2回膨張(Dilation)させる?

                # cv2.findContours(入力画像, 抽出モード, 近似法) 画像の輪郭抽出
                im2, contours, hierarchy = cv2.findContours(
                    fgmask, cv2.RETR_EXTERNAL,
                    cv2.CHAIN_APPROX_SIMPLE)  # 抽出モードを最も外側の輪郭のみに設定し、簡略化した頂点数で囲う

                # initialize var iteration
                new_center = np.empty(
                    (0, 2), float
                )  # 配列の生成速度が若干高速になることがあるため、値を0や1で初期化する必要のない場合は、np.emptyを使う

                for c in contours:

                    if (itr % fps == 0
                        ):  #これ15フレームに一回の処理じゃなくて、15回の動画読み込みに1回処理スキップしてるだけじゃね?
                        continue

                    # calc the area
                    cArea = cv2.contourArea(c)  # 領域が占める面積を計算
                    if cArea < 1000:  # if 1280x960 set to 50000, 640x480 set to 12500 面積が小さすぎたら処理をスキップ
                        continue

                    # apply the convex hull
                    c = convHull(c)  # 領域の凸包を取得

                    # rectangle area
                    x, y, w, h = cv2.boundingRect(c)  # 領域の矩形の左上のポイントと幅、高さを取得
                    x, y, w, h = padding_position(x, y, w, h,
                                                  5)  # 矩形の上下左右に5だけ広げる

                    # center point
                    cx, cy = centroidPL(c)  # 領域の重心点を取得
                    new_point = np.array([cx, cy], float)
                    new_center = np.append(new_center,
                                           np.array([[cx, cy]]),
                                           axis=0)  # 行方向に重心点を追加

                    if (old_center.size > 1):
                        #print cArea and new center point
                        print('Loop: ' + str(itr) + '   Coutours #: ' +
                              str(len(contours)))
                        print('New Center :' + str(cx) + ',' + str(cy))
                        #print 'New Center :' + str(new_center)

                        # calicurate nearest old center point
                        old_point_t = serchNN(new_point, old_center)

                        # check the old center point in the counding box
                        if (cv2.pointPolygonTest(
                                c,
                            (old_point_t[0], old_point_t[1]), True) > 0):
                            old_point = old_point_t
                            print('Old Center :' + str(int(old_point[0])) +
                                  ',' + str(int(old_point[1])))

                            # put line between old_center to new_center
                            cv2.line(frame,
                                     (int(old_point[0]), int(old_point[1])),
                                     (cx, cy), (0, 0, 255), 2)

                            count = 1

                    # put floating text
                    cv2.putText(frame, 'CA:' + str(cArea)[0:-2],
                                (x + 10, y + 20), font, 0.5, (255, 255, 255),
                                1, cv2.LINE_AA)

                    # draw center
                    cv2.circle(frame, (cx, cy), 5, (0, 0, 255), -1)

                    # draw rectangle or contour
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  3)  #rectangle contour
                    # cv2.drawContours(frame, [c], 0, (0,255,0), 2)
                    # cv2.polylines(frame, [c], True, (0,255,0), 2)

                # put fixed text, line and show images
                if detected(count) == True:
                    cv2.putText(frame, 'Detect!',
                                ((o_frame.shape[1] // 6), 30), font, 1,
                                (255, 255, 255), 1, cv2.LINE_AA)
                    self.now_count = 1
                else:
                    self.now_count = 0
                cv2.line(frame, (lp0), (lp1), (255, 0, 0), 2)
                cv2.imshow('Frame', frame)
                cv2.imshow('Background Substraction', fgmask_o)
                cv2.imshow('Contours', fgmask)

                # increase var number and renew center array
                old_center = new_center
                itr += 1

            except:
                #if there are no more frames to show...
                print('EOF')
                break

            #Abort and exit with 'Q' or ESC
            k = cv2.waitKey(30) & 0xff
            if k == 27:
                break

        cap.release()  #release video file
        cv2.destroyAllWindows()  #close all openCV windows
Esempio n. 10
0
    def detection_settings(
        self,
        skip=5,
        warmup=0,
        start_after=0,
        finish_after=0,
        history=60,
        threshold=10,
        detect_shadows=True,
        mode="MOG",
        methods=None,
        c_mask=False,
        c_mask_shape="rect",
        c_mask_size=50,
    ):

        """
        Set properties of output video file. Most settings can be left at their 
        default value.
        
        Parameters
        ----------
        skip: int, optional
            how many frames to skip between each capture
        warmup: int, optional
            warmup period in seconds for the background subtractor
        start_after: int, optional
            start after X seconds
        finish_after: int, optional
            finish after X seconds
        history: int, optional
            how many frames to use for fg-bg subtraction algorithm
        threshold: int, optional
            sensitivity-level for fg-bg subtraction algorithm (lower = more 
            sensitive)
        detect_shadows: bool, optional
            attempt to detect shadows - will be returned as gray pixels
        mode: {"MOG", "KNN"} str, optional
            type of fg-bg subtraction algorithm
        methods: method or list of methods, optional
            list with tracking_method objects
        c_mask: bool, optional 
            consecutive masking. if multiple methods are defined, the objects 
            detected first will mask the objects detected in subsequent methods
        c_mask_shape: {"rect", "ellipse", "contour"} str, optional
            which shape should the consecutive mask have
        c_mask_size: int, optional
            area in pixels that is added around the mask
            
        """
        ## kwargs
        self.skip = skip
        self.warmup = warmup
        self.start = start_after
        self.finish = finish_after
        self.flag_detect_shadows = detect_shadows
        self.flag_consecutive = c_mask
        self.consecutive_shape = c_mask_shape
        self.consecutive_size = c_mask_size

        ## select background subtractor
        if mode == "MOG":
            self.fgbg_subtractor = cv2.createBackgroundSubtractorMOG2(
                int(history * (self.fps / self.skip)), threshold, self.flag_detect_shadows
            )
        elif mode == "KNN":
            self.fgbg_subtractor = cv2.createBackgroundSubtractorKNN(
                int(history * (self.fps / self.skip)), threshold, self.flag_detect_shadows
            )

        ## return settings of methods
        if not methods.__class__.__name__ == "NoneType":
            if methods.__class__.__name__ == "tracking_method":
                methods = [methods]
            self.methods = methods
            for m in self.methods:
                m._print_settings()

        print("\n")
        print("--------------------------------------------------------------")
        print('Motion detection settings - "' + self.name + '":\n')
        print(
            "Background-subtractor: "
            + str(mode)
            + "\nHistory: "
            + str(history)
            + " seconds"
            + "\nSensitivity: "
            + str(threshold)
            + "\nRead every nth frame: "
            + str(self.skip)
            + "\nDetect shadows: "
            + str(self.flag_detect_shadows)
            + "\nStart after n seconds: "
            + str(self.start)
            + "\nFinish after n seconds: "
            + str(self.finish if self.finish > 0 else " - ")
        )
        print("--------------------------------------------------------------")
import numpy as np
import cv2
from shapely.ops import cascaded_union
import shapely.geometry as sg

cap = cv2.VideoCapture('C:/Users/Ben/Desktop/MeerkatTest/garcon_test.avi')
#cap = cv2.VideoCapture('C:/Users/Ben/Documents/OpenCV_HummingbirdsMotion/testing/PlotwatcherTest.TLV')

#cap=cv2.VideoCapture("F:/FieldWork2013/Santa Lucia 1/HDV_0316.mp4")

#fgbg5000 = cv2.createBackgroundSubtractorMOG2(500, 16,detectShadows = False)
fgbg1000 = cv2.createBackgroundSubtractorKNN()


for x in np.arange(0,1000):
    cap.grab()
    
def cont(orig,fram):
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
    
    fr=fram.copy()
    morph=orig.copy()
    poly=orig.copy()
    
    width = np.size(fr, 1)
    height = np.size(fr, 0)
    
    # Now calculate movements using the white pixels as "motion" data
    _,contours,hierarchy = cv2.findContours(fr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
                       
    top = 0
#生成视频的背景建模->黑白图像,但只有显示,没有保存文件,需要截屏
import cv2 as cv
import numpy as np

# 加载视频
cap = cv.VideoCapture(
    r"C:\Users\ningningbeibei33\Desktop\graduate\1\a\b\c\data\lift_video_10_2.mp4"
)
# 创建混合高斯模型用于背景建模
background_model = cv.createBackgroundSubtractorKNN(
)  #或者使用cv.createBackgroundSubtractorMOG2()
background_model = cv.createBackgroundSubtractorMOG2()
# 形态学操作核
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))

fourcc1 = cv.VideoWriter_fourcc(*'XVID')
fps1 = cap.get(cv.CAP_PROP_FPS)
size1 = (int(cap.get(cv.CAP_PROP_FRAME_WIDTH)),
         int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)))
out1 = cv.VideoWriter(
    "C:/Users/ningningbeibei33/Desktop/graduate/1/a/b/c/data/back_ground/BackgroundSubtractorKNN.avi",
    fourcc1, fps1, size1)
step = 0

while True:
    # 读取下一帧图像
    ret, frame = cap.read()
    # 更新模型参数,对预测结果进行开运算
    fgmask = background_model.apply(frame)
    fgmask = cv.morphologyEx(fgmask, cv.MORPH_OPEN, kernel)
    # 轮廓筛选:人
Esempio n. 13
0
def main():
    # ####upper and lower bounds for colors
    # yellow bounds from:
    # https://stackoverflow.com/questions/9179189/detect-yellow-color-in-opencv
    camera = cv2.VideoCapture(0)
    vs = WebcamVideoStream(src=0).start()

    lower_blue = np.array([110, 50, 50])
    upper_blue = np.array([130, 255, 255])
    lower_yellow = np.array([20, 100, 100])
    upper_yellow = np.array([30, 255, 255])
    grabBackground = 0;
    fgbg = None;
    reduction =None
    background = None
    grabbed = False
    while True:
        img_frame = vs.read()
        # set up img_frame for masking
        img_frame = imutils.resize(img_frame, width=900)
        img_frame = cv2.flip(img_frame, 1)
        blah = img_frame.copy()
        if (grabBackground <30):
            grabBackground += 1
        elif not grabbed:
            fgbg = cv2.createBackgroundSubtractorKNN()
            background = img_frame.copy()
            grabbed = True

        # create mask to block out all colors but blue
        if fgbg != None:
            img_frame = fgbg.apply(img_frame)
        if background != None:
            res = cv2.subtract(background,blah)

        else: res = img_frame.copy()
        res = cv2.erode(res, None, iterations=1)
        res = cv2.dilate(res, None, iterations=1) 
        img_frame = cv2.erode(img_frame, None, iterations=2)
        img_frame = cv2.dilate(img_frame, None, iterations=2)  
        if grabbed:
            contours = cv2.findContours(img_frame.copy(), cv2.RETR_EXTERNAL,
                cv2.CHAIN_APPROX_SIMPLE)[-2]
            center = None

        # only proceed if at least one contour was found
        if grabbed and (len(contours) > 0):
            # find the largest contour in the mask, then use
            # it to compute the minimum enclosing circle and
            # centroid
            c = max(contours, key=cv2.contourArea)
            ((x, y), radius) = cv2.minEnclosingCircle(c)
            M = cv2.moments(c)
            center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

            # only proceed if the radius meets a minimum size
            if radius > 10:
                # draw the circle and centroid on the img_frame,
                # then update the list of tracked points
                cv2.circle(img_frame, (int(x), int(y)), int(radius),
                    (255, 255, 255), 2)
                cv2.circle(img_frame, center, 5, (255, 255, 255), -1)


        cv2.imshow("other", res)
        cv2.imshow("Frame", img_frame)

        key = cv2.waitKey(1) & 0xFF

        # if the 'q' key is pressed, stop the loop
        if key == ord("q"):
            break
    # cleanup the camera and close any open windows
    camera.release()
    cv2.destroyAllWindows()
    vs.stop()
def main():

    number_of_watch_point = 4
    myrover = RoverSignFollower()
    myrover.turnLightsOn()
    signal.signal(signal.SIGINT, _signal_handler)
    print("Battery at " + str(myrover.getBatteryPercentage()) + "%"
          )  #Check battery status
    IMAGE_PATHS = [[] for _ in xrange(number_of_watch_point)]
    current_watch_point = 0
    time.sleep(2)
    sign_counter = 0
    break_point = 0
    while True:
        try:

            while True:
                x, y, w, h, image, cnt = myrover.detect_blue_sign(
                    myrover.getImageName())
                gap = myrover.compare_centroids('tmp.jpg', x, y, w, h)
                if abs(
                        gap
                ) > 10:  # Orient towards the center of the sign and move towards it.
                    if gap < 0:
                        myrover.turn(gap * TURN_VALUE)
                    if gap > 0:
                        myrover.turn(gap * TURN_VALUE)
                if w * h < 29000:  # Keep moving till the sign is big enough to read the command.
                    myrover.moveForward(0.2, 0.5)
                    time.sleep(0.1)
                else:
                    x, y, w, h, action_image, action_cnt = myrover.detect_blue_sign(
                        myrover.getImageName())
                    gap = myrover.compare_centroids('tmp.jpg', x, y, w, h)
                    bs_img = action(x, y, w, h, action_image, action_cnt)
                    if np.count_nonzero(bs_img) > 300000:
                        IMAGE_PATHS[current_watch_point].append(bs_img)
                    break_point = break_point + 1
                    print break_point

                if break_point > 30:
                    break_point = 0
                    break

            fgbg = cv2.createBackgroundSubtractorKNN()
            fgbg2 = cv2.createBackgroundSubtractorMOG2()
            # Background subtraction
            for iter in xrange(len(IMAGE_PATHS[current_watch_point])):
                frame = IMAGE_PATHS[current_watch_point][iter]
                frame2 = frame.copy()
                #cv2.imshow('frame', frame)
                fgmask = fgbg.apply(frame)
                fgmask2 = fgbg2.apply(frame2)
                #cv2.imshow('frame with KNN', fgmask)
                #cv2.imshow('frame with MOG', fgmask2)

                alarm = np.count_nonzero(fgmask)
                if alarm > 4000:
                    img = cv2.imread("warning.jpg")
                    cv2.imshow("warning.jpg", img)

                    k = cv2.waitKey(30) & 0xff

                    if k == 27:
                        break

            print 'shot'

            current_watch_point = (current_watch_point +
                                   1) % number_of_watch_point
            myrover.turn(165)
            time.sleep(2)

        except:
            traceback.print_exc()
            print("No Purple sign detected")
            #myrover.moveForward(0.2,-0.2) # If no blue sign detected, move forward a little and look again.
            sign_counter += 1
            if sign_counter > 3:
                sign_counter = 0
                myrover.moveForward(0.5, -0.5)
Esempio n. 15
0
import cv2
import numpy as np
bs = cv2.createBackgroundSubtractorKNN(detectShadows=False)
history = 30
bs.setHistory(history)
frames = 0
camera = cv2.VideoCapture("##.mp4")
count = 0

while True:
    ret, frame = camera.read()
    if ret == True:
        fgmask = bs.apply(frame)
        if frames < history:
            frames += 1
            continue
        print('Read a new frame: ', ret)

        th = cv2.threshold(fgmask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
        th = cv2.erode(th,
                       cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),
                       iterations=None)
        dilated = cv2.dilate(th,
                             cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (3, 3)),
                             iterations=None)
        contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        for c in contours:
            area = cv2.contourArea(c)
            if area < 50:
Esempio n. 16
0
    def run(self,
            images: np.ndarray,
            video_start_indices: list,
            load=False,
            apply_post=False,
            history=40,
            dist2Threshold=300,
            detectShadows=False):
        """
        Try loading the data
        If there is nothing to load, we have to manually go through the process

        :param images:
        :param video_start_indices:
        :return:
        """

        self.segmented_images = None
        self.logger.info(
            "Starting Background Subtraction on given Video Dataset...")

        if load:
            self.logger.info("Trying to load from saved file....")
            self._loadSegmentedImages()

        if self.segmented_images is None:

            # fgbg only takes grayscale images, we need to convert
            ## check if image is already converted to grayscale -> channels = 1
            if images.ndim > 3:
                self.logger.info(f"Data is not grayscale, converting....")
                images_gray = (np.mean(images, axis=3)).astype(np.uint8)

            else:
                self.logger.info("Data is grayscale!")
                images_gray = images

            segmented_images = np.ndarray(shape=images_gray.shape,
                                          dtype=np.uint8)
            for i in range(len(video_start_indices) - 1):
                # start index is inclusive, end index is not inclusive
                start_index = video_start_indices[i]
                end_index = video_start_indices[i + 1]
                self.logger.debug(
                    f"start index: {start_index}, end index: {end_index}")
                fgbg = cv2.createBackgroundSubtractorKNN(
                    history=history,
                    dist2Threshold=dist2Threshold,
                    detectShadows=detectShadows)

                # first round is to tune the values of the background subtractor
                for ii in range(start_index, end_index):
                    fgbg.apply(images_gray[ii])

                # second round is to extract the masked values
                for ii in range(start_index, end_index):
                    segmented_images[ii] = fgbg.apply(images_gray[ii])

                self.logger.debug(
                    f"Video {i} done! Processed {end_index - start_index} images"
                )

            if apply_post:
                self.logger.debug(
                    "Applying post additional computer vision methods to background subtracted images..."
                )
                self.segmented_images = self._postfgbg(segmented_images)
            else:
                self.segmented_images = segmented_images

        return self.segmented_images
tstart = 0.0
history = 400
dist2Threshold = 200.0
detectShadows = False
cx = 0
cy = 0
c = 0
x = 0
y = 0
dH = 0
dV = 0
thetaH = 14.68
thetaV = 10.5
Hpx = 640.0
Vpx = 480.0
fgbg = cv2.createBackgroundSubtractorKNN(history, dist2Threshold,
                                         detectShadows)
#fgbg = cv2.cudabgsegm.createBackgroundSubtractorMOG(history,nmixtures,backgroundRatio,noiseSigma)

# Small helper function to display opencv buffers via GStreamer.
#
# The display_buffers list holds references to the buffers we pass down to
# Gstreamer. This is required because otherwise python would free the memory
# before we displayed it and we do not want to copy the data to an allocated
# buffer.
display_buffers = []


def show_img(display_input, img):
    global display_buffers

    bytebuffer = img.tobytes()
Esempio n. 18
0
import cv2
import numpy as np

video_file = "Propellers_Video.avi"  #test video

kernel_dil = np.ones((20, 20), np.uint8)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

fgbg = cv2.createBackgroundSubtractorKNN()  #Background Subtraction function

cap = cv2.VideoCapture(video_file)

#cap = cv2.VideoCapture(0) for taking input from computer’s camera

while True:
    ret, frame = cap.read()

    fshape = frame.shape

    frame = frame[1:fshape[0] - 2, :fshape[1] - 2, :]  #cropping the video size

    #print frame.shape
    if ret == True:
        fgmask = fgbg.apply(frame)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        dilation = cv2.dilate(fgmask, kernel_dil, iterations=1)
        (contours, hierarchy) = cv2.findContours(dilation, cv2.RETR_TREE,
                                                 cv2.CHAIN_APPROX_SIMPLE)

        for pic, contour in enumerate(contours):
            area = cv2.contourArea(contour)
Esempio n. 19
0
import cv2 as cv

cap = cv.VideoCapture(0) # 0 means 1st camera, 1 means 2nd cam
bgmask = cv.createBackgroundSubtractorKNN()
while True:
    ret, frame =cap.read()
    if not ret:
        print('camera not working')
        break
    mask = bgmask.apply(frame)
    cv.imshow('thats me',frame)
    cv.imshow('thats me masked',mask)
    if cv.waitKey(1) == 27:
        break
cap.release()
cv.destroyAllWindows()
Esempio n. 20
0
def main():
    cap = cv2.VideoCapture(1)
    fgbg = cv2.createBackgroundSubtractorKNN()
    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    kernel_l = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
    kernel_s = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
    b = 7

    ret, bg = cap.read()
    gbg = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)
    gbg_blur = cv2.GaussianBlur(gbg, (b, b), 0)
    bg_blur = cv2.GaussianBlur(bg, (b, b), 0)
    k = 0
    while (True):
        if k == 0:
            ret, bg = cap.read()
            gbg = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)
            gbg_blur = cv2.GaussianBlur(gbg, (b, b), 0)
            bg_blur = cv2.GaussianBlur(bg, (b, b), 0)
        k = (k + 1) % 60

        ret, frame = cap.read()
        im = frame.copy()

        gim = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        gim_blur = cv2.GaussianBlur(gim, (b, b), 0)
        im_blur = cv2.GaussianBlur(im, (b, b), 0)

        res = cv2.absdiff(im_blur, bg_blur)
        gres = cv2.absdiff(gim_blur, gbg_blur)
        gres = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

        ret, mres = cv2.threshold(gres, 25, 255, cv2.THRESH_BINARY)

        for i in range(0, 3):
            ret, m = cv2.threshold(res[:, :, i], 25, 255, cv2.THRESH_BINARY)
            mres = cv2.max(mres, m)

        gres = cv2.cvtColor(gres, cv2.COLOR_GRAY2BGR)

        mres = cv2.cvtColor(mres, cv2.COLOR_GRAY2BGR)
        mres = cv2.morphologyEx(mres, cv2.MORPH_OPEN, kernel)
        mres = cv2.morphologyEx(mres, cv2.MORPH_CLOSE, kernel)

        fgmask = fgbg.apply(im)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
        fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
        # Display the resulting frame

        fg = im.astype(float)
        alpha = fgmask.astype(float) / 100
        fg = cv2.multiply(alpha, fg) / 255

        stacked = np.hstack((fg, mres))
        cv2.imshow('frame', stacked)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything done, release the capture
    cap.release()
    cv2.destroyAllWindows()
def genetic(gen, child, attrib):
    name = "gen" + str(gen) + "," + str(child)
    cap = cv2.VideoCapture()
    #http://www.chart.state.md.us/video/video.php?feed=13015dbd01210075004d823633235daa
    #Use this until we find a better traffic camera
    cap.open('./highway/input/in%06d.jpg')

    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    #bg subtractor
    fgbg = cv2.createBackgroundSubtractorKNN()
    #parameters for blob detector
    params = cv2.SimpleBlobDetector_Params()
    params.minThreshold = attrib['minT']
    params.maxThreshold = attrib['maxT']
    params.filterByArea = True
    params.minArea = attrib['minA']
    params.maxArea = attrib['maxA']
    params.filterByConvexity = True
    params.minConvexity = attrib['minCov']
    detector = cv2.SimpleBlobDetector_create(params)

    cv2.namedWindow(name, cv2.WINDOW_NORMAL)
    cv2.resizeWindow(name, w, h)

    confmat = np.zeros((2,2))
    out = 1
    score = float(0)
    while(cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            if(confmat[0][1] != 0 and confmat[1][0] != 0):
                score = confmat[0][0]/float(confmat[0][0]+confmat[1][0])+\
                        confmat[0][0]/float(confmat[0][0]+confmat[0][1])
            #bg subtract
            img1 = fgbg.apply(frame)
            #white bg for bbox
            imgbbox = sp.ones((240,320), sp.uint8) * 255
            #invert image
            cv2.bitwise_not(img1, img1)
            #gaussian
            img1 = cv2.GaussianBlur(img1,(attrib['filt'], attrib['filt']),
                                    attrib['sigma'])
            #blob detect
            points = detector.detect(img1)
            #draw bounding boxes
            for p in points:
                x1 = int(p.pt[0]-p.size/2)
                y1 = int(p.pt[1]-p.size/2)
                x2 = int(p.pt[0]+p.size/2)
                y2 = int(p.pt[1]+p.size/2)
                cv2.rectangle(img1,(x1,y1),(x2,y2),1)
                cv2.rectangle(imgbbox,(x1,y1),(x2,y2),(0,0,0),cv2.FILLED)

            #load gt in grayscale
            img_gt = cv2.imread("./highway/groundtruth/gt"+str(out).zfill(6)+".png", 0)
            cv2.bitwise_not(img_gt, img_gt)

            #everything in gt before 470 is blank
            if (out > 470):
                #comparing every 5x, 5y
                for x in range(0,len(img_gt),5):
                    for y in range(0,len(img_gt[x]),5):
                        if (img_gt[x][y] == 0 and imgbbox[x][y] == 0):
                            confmat[0][0] = confmat[0][0] + 1 #TP
                        elif (img_gt[x][y] != 0 and imgbbox[x][y] == 0):
                            confmat[0][1] = confmat[0][1] + 1 #FP
                        elif (img_gt[x][y] == 0 and imgbbox[x][y] != 0):
                            confmat[1][0] = confmat[1][0] + 1 #FN

            strinfo = 'Gen: %(gen)d, Child: %(ch)d' % {"gen":gen, "ch":child}
            cv2.putText(img1, strinfo, (5,235),0,0.4, 0);
            strscore = 'Score: %(sc).5f' % {"sc": score}
            cv2.putText(img1, strscore, (5,12),0,0.4, 0);
            cv2.imshow(name, img1)
            #cv2.waitKey(67) waits for 0.067 seconds making this ~15fps
            #Stop loop with "q"
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            out = out + 1
        else: break

    #returns precision + recall
    return score

    cap.release()
Esempio n. 22
0
parser = arg.ArgumentParser()
parser.add_argument('--inputPath',
                    help="path to input Video",
                    default='resources/MVI_9105.MOV')

args = parser.parse_args()

log.info("Video Path : '{}'".format(args.inputPath))

'-------------------------------------------------------------------------------------'
'Video Capture Init'
capture = cv.VideoCapture(args.inputPath)
capture2 = cv.VideoCapture(args.inputPath)
'Background substraction init'
backSub = cv.createBackgroundSubtractorKNN(history=200,
                                           dist2Threshold=500.0,
                                           detectShadows=False)

'Get Video Properties'
frame_count = capture.get(cv.CAP_PROP_FRAME_COUNT)
frame_width = capture.get(cv.CAP_PROP_FRAME_WIDTH)
frame_height = capture.get(cv.CAP_PROP_FRAME_HEIGHT)
fps = capture.get(cv.CAP_PROP_FPS)
video_codec = capture.get(cv.CAP_PROP_FOURCC)
video_name = args.inputPath.split(".")[0].split("/")[1]

'Log Properties'
log.info("Total Number of Frames : '{}'".format(frame_count))
log.info("Frame width : '{}'".format(frame_width))
log.info("Frame height : '{}'".format(frame_height))
log.info("FPS : '{}'".format(fps))
Esempio n. 23
0
            info = x, y, vid.get(1)
            wr.writerow(info)
            # wr.writerow([position, vid.get(1)])
            resultFile.flush()
            print(position)

    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False

    if event == cv2.EVENT_MOUSEMOVE:
        posmouse = (x, y)


vid = cv2.VideoCapture(args['video'])
fgbg = cv2.createBackgroundSubtractorKNN(history=5000,
                                         dist2Threshold=225,
                                         detectShadows=False)

Nframes = vid.get(7)
print("Total number of frames = " + str(Nframes))

while (vid.isOpened()):
    ret, frame = vid.read()
    frame2 = cv2.resize(frame, (1440, 810))
    hsl = cv2.cvtColor(frame2, cv2.COLOR_BGR2HLS_FULL)
    one, two, three = cv2.split(hsl)
    blobs = fgbg.apply(two)
    blobs = cv2.erode(blobs, (3, 3), iterations=1)

    res = cv2.bitwise_or(blobs, two)
Esempio n. 24
0
                            2,
                            np.pi / 180,
                            100,
                            np.array([]),
                            minLineLength=20,
                            maxLineGap=5)
    return lines


def image_copy(image):
    lane_image = np.copy(image)
    cropped_image = region_of_intrest(lane_image)
    line_image = display_lines(lane_image, lines(cropped_image))
    combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 1, 1)
    return combo_image


cap = cv2.VideoCapture('test6.mp4')

forback = cv2.createBackgroundSubtractorKNN()

while (cap.isOpened()):
    _, frame = cap.read()
    fgmask = forback.apply(canny(frame))

    cv2.imshow('FG mask Frame', image_copy(fgmask))

    if cv2.waitKey(1) == ord('q'):
        break
cap.release()
cv2.destroyAllWindows()
if len(sys.argv) < 2:
  print 'Usage: ./opencv_background_subtraction_play.py [VIDEO]'
  sys.exit(1)

video_capture = cv2.VideoCapture(sys.argv[1])
if not video_capture.isOpened():
  print 'Video file could not be opened.'
  sys.exit(1)

# If the frame size is too large for the monitor, we can resize the window.
cv2.namedWindow('Window', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Window', int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)/2),
                 int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)/2))

num_frames = video_capture.get(cv2.CAP_PROP_FRAME_COUNT)
knn = cv2.createBackgroundSubtractorKNN()
frame_idx = 0
while frame_idx < num_frames:
  ret, frame = video_capture.read()
  frame_idx += 1

  if not ret:
    print 'Frame %d could not be read properly.' % frame_idx
    sys.exit(1)

  bgs_frame = knn.apply(frame, 0)

  cv2.putText(bgs_frame, str(frame_idx), (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 2,
              (255, 255, 255))

  cv2.imshow('Window', bgs_frame)
import cv2

#cap=cv2.VideoCapture('vtest.avi')
import sys
import video
try:
    fn = sys.argv[1]
except IndexError:
    fn = 0
cap = video.create_capture(fn)

#MOG MOG2 GMG KNN
#fgbg = cv2.createBackgroundSubtractorMOG2()
kernal = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=None)

while (cap.isOpened()):

    _, img = cap.read()

    frame = fgbg.apply(img)
    # frame = cv2.morphologyEx(frame,cv2.MORPH_OPEN
    #                         ,kernal)

    cv2.imshow('image', img)
    cv2.imshow('subtraced', frame)
    if cv2.waitKey(30) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()
Esempio n. 27
0
    except AttributeError:
        warnings.warn(
            "BackgroundSubtractorMOG2 does not exist in your OpenCV {}".format(
                cv2.__version__))
    """
    BSubtractorKNN
    """
    params = np.linspace(1, 100, 20)
    x_mask = []
    print("\n\nBackgroundSubtractorKNN Method (from OpenCV {})".format(
        cv2.__version__))
    try:
        bg_substractor = cv2.BackgroundSubtractorKNN()
        for id, p in enumerate(params):
            bg_substractor = cv2.createBackgroundSubtractorKNN(
                history=0, dist2Threshold=p, detectShadows=False)
            x_mask.append(
                extractBackgroundSubtrasctor_CV(bg_substractor,
                                                data=input,
                                                im_show=False))
            sys.stdout.write("\r>  Computing ... {:.2f}%".format(
                (id + 1) * 100 / len(params)))
            sys.stdout.flush()

        print("\n")

        TP_list, FP_list, TN_list, FN_list = extractPerformance(
            gt, x_mask, array_params=params)
        precision_list, recall_list, fscore_list, accuracy_list = metrics(
            TP_list, FP_list, TN_list, FN_list, x_mask, array_params=params)
Esempio n. 28
0
def main(input_file='vtest.avi',
         heatmap_color=cv2.COLORMAP_BONE,
         color_label="cv2.COLORMAP_BONE"):
    print("Input source: " + input_file + " ", heatmap_color)

    cap = cv2.VideoCapture(input_file)
    #fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
    #fgbg = cv2.createBackgroundSubtractorMOG2()
    fgbg = cv2.createBackgroundSubtractorKNN()

    # Debug parameters
    fps = cap.get(cv2.CAP_PROP_FPS)  # Frames per second
    frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)  # Frame count
    frame_number = 0  # Frame number

    success, frame = cap.read()
    first_frame = copy.deepcopy(frame)
    height, width = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).shape[:2]
    accum_image = np.zeros((height, width), np.uint8)
    while success and cap.isOpened():
        index = 0
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # convert to grayscale
        fgmask = fgbg.apply(gray)  # remove the background

        # for testing purposes, show the result of the background subtraction
        # cv2.imshow('diff-bkgnd-frame', fgmask)

        # apply a binary threshold only keeping pixels above thresh and setting the result to maxValue.  If you want
        # motion to be picked up more, increase the value of maxValue.  To pick up the least amount of motion over time, set maxValue = 1
        thresh = 2
        maxValue = 2
        ret, th1 = cv2.threshold(fgmask, thresh, maxValue, cv2.THRESH_BINARY)
        # for testing purposes, show the threshold image
        # cv2.imwrite('diff-th1.jpg', th1)

        # add to the accumulated image
        accum_image = cv2.add(accum_image, th1)
        # for testing purposes, show the accumulated image
        # cv2.imwrite('diff-accum.jpg', accum_image)

        # for testing purposes, control frame by frame
        # raw_input("press any key to continue")

        try:
            color_map_img = cv2.applyColorMap(accum_image, heatmap_color)
        except TypeError:
            print("Could not apply color: " + heatmap_color)
            return
        im0 = cv2.resize(color_map_img, (1000, 980))
        font_size, font_thickness = 0.7, 2
        text = color_label[4:]
        x, y, w, h = 15, 30, 275, 75
        # Draw black background rectangle
        cv2.rectangle(im0, (x, x), (x + w, y + h), (0, 0, 0), -1)
        im0 = cv2.putText(im0, text, (x + int(w / 10), y + int(h / 2)),
                          cv2.FONT_HERSHEY_SIMPLEX, font_size, (255, 55, 255),
                          font_thickness)

        cv2.namedWindow(Original_WIN)
        cv2.moveWindow(Original_WIN, 1080, 0)
        cv2.namedWindow(Heatmap_WIN)
        cv2.moveWindow(Heatmap_WIN, 20, 0)
        cv2.imshow(Heatmap_WIN, im0)
        cv2.imshow(Original_WIN, frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        elif cv2.waitKey(1) & 0xFF == ord('k'):
            print("Terminated")
            #sys.exit(0)
        #print("Frame: ", frame_number)
        #print("FPS: ", fps)
        while frame_number < 1:
            os.system("xdotool getactivewindow windowmove -- -40 0")
            frame_number += 1
        frame_number += 1
        #cap.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
        success, frame = cap.read()

    # apply a color map
    print("writing to file")
    color_image = im_color = cv2.applyColorMap(accum_image, heatmap_color)
    # for testing purposes, show the colorMap image
    # cv2.imwrite('diff-color.jpg', color_image)

    # overlay the color mapped image to the first frame
    result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)

    # save the final overlay image
    cv2.imwrite('diff-overlay.jpg', result_overlay)

    # cleanup
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 29
0
parser.add_argument('--input',
                    type=str,
                    help='Path to a video or a sequence of image.',
                    default='vtest.avi')
parser.add_argument('--algo',
                    type=str,
                    help='Background subtraction method (KNN, MOG2).',
                    default='MOG2')
args = parser.parse_args()

## [create]
#create Background Subtractor objects
if args.algo == 'MOG2':
    backSub = cv.createBackgroundSubtractorMOG2()
else:
    backSub = cv.createBackgroundSubtractorKNN()
## [create]

## [capture]
capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input))
if not capture.isOpened:
    print('Unable to open: ' + args.input)
    exit(0)
## [capture]

while True:
    ret, frame = capture.read()
    if frame is None:
        break

    ## [apply]
import numpy as np
import cv2
from PIL import ImageFont, ImageDraw, Image

def f(x): print(x)

cap = cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorKNN()
cv2.namedWindow('Mask')
cv2.createTrackbar('Learning rate', 'Mask', 0, 100, f)
cv2.createTrackbar('Shadow rate', 'Mask', 0, 127, f)
cv2.createTrackbar('Algorithm - 0:KNN 1:MOG2 2:CNT', 'Mask', 0, 2, f)
algorithmNumber = 0
algorithmName = 'KNN'

while(True):
    ret, frame = cap.read()
    learningRate = cv2.getTrackbarPos('Learning rate', 'Mask')
    shadow = cv2.getTrackbarPos('Shadow rate', 'Mask')
    algorithm = cv2.getTrackbarPos('Algorithm - 0:KNN 1:MOG2 2:CNT', 'Mask')

    if algorithmNumber != algorithm:
        if algorithm == 0:
            fgbg = cv2.createBackgroundSubtractorKNN()
            fgbg.setShadowValue(shadow)
            algorithmName = 'KNN'
        elif algorithm == 1:
            fgbg = cv2.createBackgroundSubtractorMOG2()
            fgbg.setShadowValue(shadow)
            algorithmName = 'MOG2'
        elif algorithm == 2:
Esempio n. 31
0
import cv2
import numpy as np
import math
from scipy.spatial.distance import pdist

video = cv2.VideoCapture('/Users/itaegyeong/Desktop/good.mov')
knn = cv2.createBackgroundSubtractorKNN()

# 이전의 프레임과 비교하여 w,h 도 크게 차이나지 않으며, 거리도 가장 작은것으로 선택
# 각 선택된것은 선수 1,2,3,4,5 좌표값으로 저장 후 칼만필터를 이용해서 추적하게 처리


def distance(pre_frame, current_frame):

    min_num = []

    for pre in range(0, len(pre_frame)):

        min_dis = math.sqrt((pre_frame[pre][0] - current_frame[0][0]) *
                            (pre_frame[pre][0] - current_frame[0][0]) +
                            (pre_frame[pre][1] - current_frame[0][1]) *
                            (pre_frame[pre][1] - current_frame[0][1]))

        for curr in range(1, len(current_frame)):

            dis = math.sqrt((pre_frame[pre][0] - current_frame[curr][0]) *
                            (pre_frame[pre][0] - current_frame[curr][0]) +
                            (pre_frame[pre][1] - current_frame[curr][1]) *
                            (pre_frame[pre][1] - current_frame[curr][1]))

            if min_dis > dis:
Esempio n. 32
0
#!/usr/bin/env python
import freenect
import cv2
import frame_convert2
import numpy as np
import math, time, io, sys
from collections import deque
from time import sleep
import gpiozero

### GLOBAL VARIABLES
kernel = np.ones((3, 3), np.uint8)  #small structuring element
kernel_big = np.ones((9, 9), np.uint8)  #big structuring element
backSub = cv2.createBackgroundSubtractorKNN()
backSub_depth = cv2.createBackgroundSubtractorKNN(history=100,
                                                  dist2Threshold=400.0,
                                                  detectShadows=True)
CACHE_SIZE = 4  # size of the list that stores previous distance values, must be 4 or greater
if CACHE_SIZE < 4: CACHE_SIZE = 4
pre_distances = deque(
    [10000] * CACHE_SIZE
)  # stores previous distances of the two biggest blobs to recognize valid movement
BLOB_MAX_SIZE = 40000
BLOB_MIN_SIZE = 1000
IMG_DEPTH = 0
IMG_RGB = 1
THRESHOLD = 814
DEPTH = 152
TIME_BETWEEN_FRAMES = .3  # good values for testing .3 (fast), 1 (slow)
RELAY_PIN = 21
Esempio n. 33
0
import numpy as np
import cv2
color = (12, 255, 148)  #框图颜色
knn = cv2.createBackgroundSubtractorKNN(detectShadows=True)
#去掉静态背景
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
#得到椭圆的核
camera = cv2.VideoCapture('sWasteBasket.avi')


def drawCnt(fn, cnt):
    if cv2.contourArea(cnt) > 1400:
        #(x, y, w, h) = cv2.boundingRect(cnt)
        #cv2.rectangle(fn, (x, y), (x + w, y + h), color, 2)
        rect = cv2.minAreaRect(cnt)
        #得出最小矩形
        box = cv2.boxPoints(rect)
        #得出该矩形的四个顶点
        box = np.int64(box)
        #转为整型数据
        fire = cv2.drawContours(frame, [box], 0, color, 2)


framecount = 0
while True:
    ret, frame = camera.read()
    framecount += 1
    if not ret:
        break
        #当ret为None时,视频结束
    frame = cv2.resize(frame, (480, 320))
Esempio n. 34
0
 def __init__(self):
     self.backgroundSubtractor = cv2.createBackgroundSubtractorKNN(
         history=500, dist2Threshold=400, detectShadows=True)
Esempio n. 35
0
def main():
  global color, roiPoints, inputMode, itter

  cv2.namedWindow('color')
  cv2.setMouseCallback('color', selectROI)
  cv2.createTrackbar('tb1', 'color', 1, 10, handleTrackbar)

  termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
  roiBox = None
  quit = False
  tracker = cv2.Tracker_create('TLD')
  tracker_init = False
  tracker_box = (263.0,462.0,98,51)

  fgbg = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)
  firstFrame = None

  while True:
    frames = listener.waitForNewFrame()
    color = frames["color"]
    ir = frames["ir"]
    depth = frames["depth"]

    kernel = np.ones((5,5),np.uint8)

    registration.apply(color, depth, undistorted, registered)
    color = cv2.resize(color.asarray(), (int(1920/2), int(1080/2)))
    hsv = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)
    fgmask = fgbg.apply(hsv)

    gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21, 21), 0)

    if firstFrame is None:
      firstFrame = gray

    # frameDelta = cv2.absdiff(firstFrame, gray)
    # thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    # thresh = cv2.dilate(thresh, None, iterations=10)
    #
    # _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    #
    # for c in cnts:
    #   if cv2.contourArea(c) < 500:
    #     continue
    #   (cx, cy, cw, ch) = cv2.boundingRect(c)
    #   cv2.rectangle(color, (cx, cy), (cx+cw, cy+ch), (0,0,255), 2)

    if roiBox is not None:
      backProj = cv2.calcBackProject([fgmask], [0], roiHist, [0, 180], 1)
      if not tracker_init:

        tracker.init(hsv, (roiBox[0], roiBox[1], roiBox[2] - roiBox[0], roiBox[3] - roiBox[1]))
        tracker_init = True
      (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
      (r2, tracker_box) = tracker.update(hsv)

      p1 = (int(tracker_box[0]), int(tracker_box[1]))
      p2 = (int(tracker_box[0] + tracker_box[2]), int(tracker_box[1] + tracker_box[3]))
      cv2.rectangle(hsv, p1, p2, (250,0,0), 2)
      cv2.rectangle(color, p1, p2, (250,0,0), 2)

      pts = np.int0(cv2.boxPoints(r))

      cv2.polylines(color, [pts], True, (0, 255, 0), 2)
      cv2.polylines(hsv, [pts], True, (0, 255, 0), 2)

    cv2.imshow('hsv', hsv)
    cv2.imshow('color', color)
    cv2.imshow('frame', fgmask)
    key = cv2.waitKey(1) & 0xFF

    if key == ord("i") and len(roiPoints) < 4:
      inputMode = True
      while len(roiPoints) < 4:
        cv2.imshow("color", color)
        cv2.waitKey(0)
      roiPoints = np.array(roiPoints)
      s = roiPoints.sum(axis=1)
      tl = roiPoints[np.argmin(s)]
      br = roiPoints[np.argmax(s)]

      roi = hsv[tl[1]:br[1], tl[0]:br[0]]
      roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
      roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
      roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
      roiBox = (tl[0], tl[1], br[0], br[1])

    elif key == ord('q'):
      quit = True

    listener.release(frames)
    if quit:
      break
exec code
for i in [1,50,100,150,200,250]:
    code='''avg%s=vid.get_data(%s)'''%(str(i), i)
    exec code
    
from skimage import io
io.imshow(avg1-avg100)
io.imshow(avg1+avg100)
io.imshow(avg1+avg100+avg150)
io.imshow(avg1+avg100+avg150+avg200)
cv2.accumulateWeighted(vid.get_data(10), avg1, 1)
cv2.accumulateWeighted(vid.get_data(102), avg1, 1)
io.imshow(avg1-avg100)
io.imshow(reversed(avg1-avg100))
io.imshow((avg1+avg100))
fgbg= cv2.createBackgroundSubtractorKNN()
fgmask = fgbg.apply(avg1)
io.imshow(fgmask)
fgbg= cv2.createBackgroundSubtractorMOG2()
fgmask = fgbg.apply(avg1)
io.imshow(fgmask)
fgbg= cv2.createBackgroundSubtractorMOG2(1000)
fgbg.getBackgroundImage()
io.imshow(fgbg.getBackgroundImage())
io.imshow(fgmask)
fgbg= cv2.createBackgroundSubtractorMOG()
fgbg= cv2.createBackgroundSubtractorMOG2()
fgbg.apply(ivg1, ivg100)
fgbg.apply(avg1, avg100)
a = fgbg.apply(avg1, avg100)
io.imshow(a)
    def __init__(self, *args):
        # self.video = "data/vtest.avi"
        # self._camera = cv2.VideoCapture(self.video)  # 参数0表示第一个摄像头
        self._camera = cv2.VideoCapture(0)  # 参数0表示第一个摄像头
        # 等待两秒
        time.sleep(3)

        # 判断视频是否打开
        if (self._camera.isOpened()):
            print('Open')
        else:
            print('摄像头未打开')

        self._vedioWidth = int(self._camera.get(cv2.CAP_PROP_FRAME_WIDTH))
        self._vedioHeight = int(self._camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

        self._showSize = 400
        # self._vedioWidth = int(self._showSize)
        # self._vedioHeight = 300 # int(self._showSize)
        self._locationX = 0
        self._locationY = 0
        self._locationHeight = 0
        self._locationWidth = 0
        self._locationXOld = 0
        self._locationYOld = 0
        self._findTargetNumber = 0
        self._isOutputResult = False
        self._outputFile = None
        self._outputFileName = None
        self._outputFileExcel = None
        self._outputFileNameExcel = None

        print("vedio size", self._vedioWidth, self._vedioHeight)

        # 目标检测
        self._motionDetected = False
        self._debugMode = False
        self._bsknn = cv2.createBackgroundSubtractorKNN(detectShadows=True)
        self._isKnnDetect = False
        self._findTargetList = []
        self._findTargetListOld = []
        self._lineList = []

        self._startDetectTime = 0
        self._endDetectTime = 0
        self._sameFindTargetNumber = 0
        self._targetquen = deque()
        self._targetNewIndexList = []

        #输出数据
        self._outputData = []
        self._outputDataHeaders = ('时间', '说明', '数量', '位置X', '位置X', '宽度', '高度',
                                   '速度dx', '速度dy')
        self._outputDataFile = tablib.Dataset(*self._outputData,
                                              headers=self._outputDataHeaders)

        # 加载主窗口
        super(MyWindow, self).__init__(*args)

        # loadUi("mainwindow.ui", self)  # 通过uic的loadUi加载UI
        self.ui = Ui_mainWindow()
        self.ui.setupUi(self)

        # self.ui.label_ShowImg.resize(self._vedioWidth, self._vedioHeight)
        self.setWindowIcon(QIcon("./icon.png"))
        # 设置窗口固定大小
        # self.setFixedHeight(380)
        # self.setFixedWidth(640)

        # 显示监控视频
        self.timer = QTimer(self)
        self.count = 0

        self.timer.timeout.connect(self.objectTrackingKNN)
        self.startCount()

        # 加载信号与槽
        self.ui.pushButtonBeginDetection.clicked.connect(self.beginDetection)
        self.ui.pushButtonEndDetection.clicked.connect(self.endDetection)
        self.ui.pushButtonOutputData.clicked.connect(self.outputData)
Esempio n. 38
0
def test_KNN(video, output):
    cap = cv2.VideoCapture(video)
    fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=True)
    history = 0
    fgbg.setHistory(history)

    # 1. 获取视频码率、格式:
    fps = cap.get(cv2.CAP_PROP_FPS)  # opencv3没有cv.CV_
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    codec = (cap.get(cv2.CAP_PROP_FOURCC))

    print(fps, size, codec)

    # 2. 指定写视频的格式, I420-avi, MJPG-mp4
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    videoWriter = cv2.VideoWriter(output, int(codec), fps, size)

    # 3. 针对问题四指定 帧号:
    frame_list = []
    frames = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if ret == True:
            # cv2.waitKey(1000 // int(fps))
            fgmask = fgbg.apply(frame)  # , learningRate=0.01

            if frames < history:  # 初始延迟等待
                frames += 1
                continue

            # 对原始帧进行膨胀去噪
            th = cv2.threshold(fgmask.copy(), 244, 255, cv2.THRESH_BINARY)[1]
            th = cv2.erode(th,
                           cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                     (3, 3)),
                           iterations=2)
            dilated = cv2.dilate(th,
                                 cv2.getStructuringElement(
                                     cv2.MORPH_ELLIPSE, (8, 3)),
                                 iterations=2)
            # 获取所有检测框
            image, contours, hier = cv2.findContours(dilated,
                                                     cv2.RETR_EXTERNAL,
                                                     cv2.CHAIN_APPROX_SIMPLE)

            # 当前帧号获取:
            frame_now = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
            if len(contours) >= 2:
                print('frame_now:', frame_now, len(contours))
                frame_list.append(frame_now)

            # 保存视频
            videoWriter.write(image)

            cv2.imshow('frame', fgmask)

            cv2.imshow("back", image)

            k = cv2.waitKey(3000) & 0xff
            if k == 27:
                continue
        else:
            print('finished.')
            break
    print(frame_list)
    cap.release()
    videoWriter.release()
    cv2.destroyAllWindows()
Esempio n. 39
0
        fgmask = fgbg.apply(frame)
        result = cv2.bitwise_and(frame,frame,mask=fgmask)
    elif mode == 3:
        beta = 0.9
        fgmask = fgbg.apply(frame)
        result = cv2.bitwise_and(bg,bg,mask=fgmask)
        result = cv2.addWeighted(bg,beta,result,1-beta, 0.0)
    return result
    
if __name__ == '__main__':
    # ウェブカメラからキャプチャを取得
    cap = cv2.VideoCapture(1)
    global bg
    
    #fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
    fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=False)
    mode = 0
    
    while(True):
        # 操作部
        c = cv2.waitKey(1)
        if c == 'm':     # 漫画風            
            mode = 1
        elif c == 'f':     # 前景抽出
            mode = 2
        elif c == 'o':     # 原画像
            mode = 0
        elif c == 't':     # 透明人間
            mode = 3
            _,bg = cap.read()
        elif c == 'q' | c == 27:     # 終了
Esempio n. 40
0
pts = deque(maxlen=100000)
posx = deque(maxlen=constant.DECK)
posy = deque(maxlen=constant.DECK)

counter = 0
(dX, dY) = (0, 0)
direction = ""

startTime = datetime.now()

cv2.destroyAllWindows()

# From warp.py
fgbg1 = cv2.createBackgroundSubtractorMOG2(history=5000, varThreshold=20)
fgbg2 = cv2.createBackgroundSubtractorMOG2(history=5000, varThreshold=100)
fgbg3 = cv2.createBackgroundSubtractorKNN(history=5000, dist2Threshold=250)

for_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, constant.ERODE)
for_di = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, constant.DILATE)
# for_di1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

# out = cv2.VideoWriter("Uca_detection.avi",
#                       cv2.VideoWriter_fourcc("M", "J", "P", "G"), 24, (464, 464))

if constant.MANUAL_ANNOTATION is True:
    try:
        name = video_name + "_" + str(
            input("* Please enter name for this individual: "))
        # species = str(input("* Please enter species name for this individual: "))
        # sex = str(input("* Please enter sex for this individual: "))
        # handedness = str(input(" *Please enter handedness for this individual: "))
 def start_expt(self):  
     if self.use_arduino:
         self.expt_timestring = time.strftime("%Y-%m-%d") + " " + time.strftime("%H.%M.%S") + " " + '- {} Hz {} Pulse width'.format(self.led_freq, self.led_dur)
     else:
         self.expt_timestring = time.strftime("%Y-%m-%d") + " " + time.strftime("%H.%M.%S")
     #create new directory for the data we are about to generate
     self.save_dir = '{}'.format(os.path.abspath(os.path.join(self.default_save_dir,self.expt_timestring)))
     if not os.path.isdir(self.save_dir):
         os.makedirs(self.save_dir)  
         
     self.parent_conn.send('Time:{}'.format(self.expt_timestring))
     time.sleep(0.25)
     self.parent_conn.send('Start!')
     #give a bit of time for the child process to get started
     time.sleep(0.25)
     
     # Implement a K-Nearest Neighbors background subtraction
     # Most efficient when number of foreground pixels is low (and image area is small)
     # So we will create one background subtractor for each ROI
     self.bg_sub_dict = {roi_name:cv2.createBackgroundSubtractorKNN(5,300,False) for roi_name in self.roi_list}
     
     prev_time_stamp = 0        
     self.max_q_size = 0               
     #setup a dictionary of lists for analysis results
     self.plotting_dict = {}
     #setup a ditionary of deques for plotting
     self.results_dict = {}      
     for roi_name in self.roi_list:
         self.results_dict[roi_name] = list()
         self.plotting_dict[roi_name] = deque(maxlen=100)
         
     #initialize matplotlib plots for raw group activity
     act_fig, act_axes = self.init_activity_plots()      
     #do an initial subplot background save
     backgs = [ax.figure.canvas.copy_from_bbox(ax.bbox) for ax in chain(*act_axes)]
     lns = [ax.plot([],[])[0] for ax in chain(*act_axes)]        
            
     msg = None
     
     #Python "dot" loop optimization:
     #see: https://wiki.python.org/moin/PythonSpeed/PerformanceTips
     if hasattr(self, 'expt_conn_obj'):
         expt_conn_obj_poll = self.expt_conn_obj.poll
         expt_conn_obj_recv = self.expt_conn_obj.recv
     data_q_get = self.data_q.get
     np_ndarray = np.ndarray
     data_q_qsize = self.data_q.qsize
     sys_stdout_flush = sys.stdout.flush
     get_activity_counts = self.get_activity_counts
     bg_sub_dict = self.bg_sub_dict
     roi_dict  = self.roi_dict
     roi_list = self.roi_list    
     show_tracking = self.show_tracking
     update_plots = self.update_plots
     
     #profiler.enable()
    
     while True:           
         if hasattr(self, 'expt_conn_obj'):
             if expt_conn_obj_poll():
                 msg = expt_conn_obj_recv() 
             if msg == 'Shutdown!':
                 self.shutdown_expt_manager()
                 
         time_stamp, frame, stim_bool = data_q_get()   
         
         #check if the experiment data collection has completed
         if type(frame) == str:
             if frame == 'stop':
                 #let's close everything down
                 cv2.destroyAllWindows()
                 #clean up the expt control process
                 self.data_q.close()
                 self.data_q.join_thread()
                 self.child_conn.close()
                 self.parent_conn.close()
                 self.control_expt_process.terminate()
                 break
         
         elif type(frame) == np_ndarray:                
             #print frame.dtype, frame.size
             #print (time_stamp, stim_bool)            
             fps = 1/(time_stamp-prev_time_stamp)
             prev_time_stamp = time_stamp           
             print('Lagged frames: {} fps: {}'.format(int(data_q_qsize()),fps))
             sys_stdout_flush()
             
             if int(data_q_qsize() > self.max_q_size):
                 self.max_q_size = data_q_qsize()
     
             #order of result sublists should be ['line1', 'line2', 'roi1', 'roi2', 'roi3', 'roi4']   
             results = [get_activity_counts(roi_name, bg_sub_dict[roi_name], frame, roi_dict[roi_name]) for roi_name in roi_list]           
             roi_counts, roi_frames = zip(*results)     
                            
             for roi_indx, roi_name in enumerate(roi_list):
                 #append roi_counts to the results dictionary
                 self.results_dict[roi_name].append([time_stamp, roi_counts[roi_indx], stim_bool])
                 #append roi_counts to the plotting deque
                 self.plotting_dict[roi_name].append([time_stamp, roi_counts[roi_indx]])
             
             #only display every 3rd tracked frame
             #Results in massive speedup
             if update_plots.calls % 3 == 0:
                 show_tracking(roi_frames)
                                    
             #resave the backgrounds with drawn data but only if update_plots has been called near the plotting_dict deque length
             if update_plots.calls % 99 == 0:
                 backgs = [ax.figure.canvas.copy_from_bbox(ax.bbox) for ax in chain(*act_axes)]           
             update_plots(act_axes, lns, backgs)
             
     #profiler.disable()
     #profiler.dump_stats(os.path.join(desktop_path,"Stats.dmp"))
     
     #update plots one more time after experiment loop has finished so user can see overall activity results
     update_plots(act_axes,lns,backgs)
  
     #Okay we've finished analyzing all them data. Time to save it out.   
     if self.write_csv:
         import csv
         results_keys = sorted(self.results_dict.keys())
         
         for key in results_keys:        
             with open("{}/{}-{}.csv".format(self.save_dir, self.expt_timestring, key), "wb") as outfile:
                 writer = csv.writer(outfile)
                 writer.writerow(["Time Elapsed (sec)", "Number of active flies", "Stimulation"])
                 writer.writerows(self.results_dict[key])                
         print("CSVs written to data folder!")
     else:
         print("Experiment is complete! Ready for the next one!")
Esempio n. 42
0
def main():
    counter = 0
    dq = Queue()
    displayInput = Thread(target=display, args=(dq, ))
    displayInput.daemon = True
    displayInput.start()

    mt = MultiTracker.MultiTracker()

    cap = FileVideoStream('fish7.mp4').start()
    time.sleep(1.0)

    frame = cap.read()
    vh, vw = frame.shape[:2]

    subtractor = cv2.createBackgroundSubtractorKNN(history=50,
                                                   dist2Threshold=120,
                                                   detectShadows=False)
    # subtractor = cv2.createBackgroundSubtractorMOG2(history=120, varThreshold=100, detectShadows=False)

    fps.start()

    while cap.more():
        # frame = cv2.flip(frame, 1)
        small_frame = cv2.resize(frame,
                                 dsize=None,
                                 dst=None,
                                 fx=(50 / 100),
                                 fy=(50 / 100),
                                 interpolation=cv2.INTER_LANCZOS4)
        filtered_frame = small_frame.copy()
        trackers = mt.update(small_frame)
        if len(trackers) > 0:
            for tracker in trackers:
                c, bbox = tracker

                offset = 0
                xy1 = (int(bbox[0]) - offset, int(bbox[1]) - offset)
                xy2 = (int(bbox[2]) + offset, int(bbox[3]) + offset)

                cv2.rectangle(small_frame, xy1,
                              ((xy1[0] + xy2[0]), (xy1[1] + xy2[1])),
                              (0, 0, 255), 1)
                cv2.putText(small_frame, 'id: {}'.format(c),
                            (xy1[0], xy1[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1,
                            (0, 0, 0), 1, cv2.LINE_AA)
                cv2.rectangle(filtered_frame, xy1,
                              ((xy1[0] + xy2[0]), (xy1[1] + xy2[1])),
                              (255, 255, 255), -1)

        grayFrame = cv2.cvtColor(filtered_frame, cv2.COLOR_BGR2GRAY)
        # cv2.imshow('gray', grayFrame)
        grayFrame = cv2.GaussianBlur(grayFrame, (5, 5), 0)

        mask = subtractor.apply(grayFrame)
        mask = cv2.morphologyEx(mask,
                                cv2.MORPH_OPEN,
                                np.ones((5, 5), np.uint8),
                                iterations=2)
        mask = cv2.morphologyEx(mask,
                                cv2.MORPH_CLOSE,
                                np.ones((5, 5), np.uint8),
                                iterations=2)
        mask = cv2.bitwise_and(grayFrame, grayFrame, mask=mask)
        # H = cv2.Sobel(mask, cv2.CV_8U, 0, 1)
        # V = cv2.Sobel(mask, cv2.CV_8U, 1, 0)
        # mask = H + V
        cv2.imshow('mask', mask)
        cv2.waitKey(30)
        _, threshold = cv2.threshold(mask, 25, 150, 0)
        contours, hierarchy = cv2.findContours(threshold, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        for contour in contours:
            x, y, w, h = cv2.boundingRect(contour)

            if x >= int(vw / 4) and x <= int((vw / 4) + 3):
                counter += 1
                mt.add(counter, small_frame, (int(x), int(y), int(w), int(h)))
            else:
                small_frame = cv2.rectangle(small_frame, (x, y),
                                            (x + w, y + h), (0, 0, 255),
                                            cv2.FILLED)

        small_frame = cv2.resize(small_frame,
                                 dsize=None,
                                 dst=None,
                                 fx=(2),
                                 fy=(2),
                                 interpolation=cv2.INTER_LANCZOS4)

        fps.stop()
        fps.update()
        # print(fps.fps())
        cv2.imshow('t', small_frame)
        cv2.waitKey(0)

        dq.put(small_frame)
        frame = cap.read()

    print('Main thread done, waiting all thread queue to be done')
    dq.join()
    print('ENDING')
    cap.stop()
Esempio n. 43
0
               ['finger', 'up', 'moveup'], ['finger', 'down', 'movedown'],
               ['finger', 'clockwise', 'zoomin'],
               ['finger', 'anticlockwise', 'zoomout'],
               ['palm', 'left', 'turnleft'], ['palm', 'right', 'turnright'],
               ['palm', 'up', 'close'], ['palm', 'down', 'return'],
               ['palm', 'clockwise', 'ok'],
               ['palm', 'anticlockwise', 'cancel']]

# 记录手形识别数据[手部面积占比范围,手部最小外接图像宽高比范围,手形名称]
handdata = []

# videoProcessing函数中用
handTrackLen = 15  # 跟踪的手部运动轨迹长度
handTrack = list([(0, 0)] * handTrackLen)  # 记录手部轨迹坐标元组的循环列表
hPoint = 0  # handTrack列表当前位置指针
conHandTrackLen = 8  # 用连续conHandTrackLen次轨迹判定结果生成最终轨迹,影响灵敏度,不大于handTrackLen
conHandTrack = list(['static'] * conHandTrackLen)  # 记录手部轨迹识别结果,循环列表
handShapes = list([None] * conHandTrackLen)  # 记录手形的循环列表,用于平滑手形识别结果
tPoint = 0  # conHandTrack列表当前位置指针

# 背景建模,用于手部分割,history设小了,停顿的手会成为背景,影响检测
frameBackGround = cv2.createBackgroundSubtractorKNN(history=500,
                                                    detectShadows=False)

TrackingHand = None  # 正在跟踪的手部对象,Kalman滤波

# 调参、调试用=====================================
myTrackBar = None  # 调节工具条,主要供调节颜色参数用
myPlot = None  # 绘图工具,主要供查看颜色直方图调参用
# ================================================
import freenect
import cv2
import frame_convert2
import numpy as np
import math, time, io, sys
from collections import deque
from time import sleep
import gpiozero

### GLOBAL VARIABLES
KERNEL = np.ones((3, 3), np.uint8)
KERNEL_BIG = np.ones((9, 9), np.uint8)
# Create two independent background substractors, because RGB and depth image might need different parameters:
# NOTE: ADAPT THE RGB SUBSTRACTOR PARAMETERS ON THE LOCATION YOU SET UP THE KINECT TO GET BEST RECOGNITION:
backSubDepth = cv2.createBackgroundSubtractorKNN(history=10000,
                                                 dist2Threshold=50,
                                                 detectShadows=0)
backSubRgb = cv2.createBackgroundSubtractorKNN(
    history=10000, dist2Threshold=400,
    detectShadows=1)  # use default parameters
#backSub = cv2.createBackgroundSubtractorMOG2() # performed worse then KNN
CACHE_SIZE = 4  # size of the list that stores previous distance values, must be 4 or greater
if CACHE_SIZE < 4: CACHE_SIZE = 4
pre_distances = deque(
    [10000] * CACHE_SIZE
)  # stores previous distances of the two biggest blobs to recognize valid movement
BLOB_MAX_SIZE = 40000
BLOB_MIN_SIZE = 3000
IMG_DEPTH = 0
IMG_RGB = 1
THRESHOLD = 814
Esempio n. 45
0
File: knn.py Progetto: DukasGuo/pycv
import cv2
import numpy as np

knn = cv2.createBackgroundSubtractorKNN(detectShadows = True)
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,12))
camera = cv2.VideoCapture("/home/d3athmast3r/Downloads/traffic.flv")

def drawCnt(fn, cnt):
  if cv2.contourArea(cnt) > 1400:
    (x, y, w, h) = cv2.boundingRect(cnt)
    cv2.rectangle(fn, (x, y), (x + w, y + h), (255, 255, 0), 2)

while True:
  ret, frame = camera.read()
  if not ret:
    break
  fg = knn.apply(frame.copy())
  fg_bgr = cv2.cvtColor(fg, cv2.COLOR_GRAY2BGR)
  bw_and = cv2.bitwise_and(fg_bgr, frame)
  draw = cv2.cvtColor(bw_and, cv2.COLOR_BGR2GRAY)
  draw = cv2.GaussianBlur(draw, (21, 21), 0)
  draw = cv2.threshold(draw, 10, 255, cv2.THRESH_BINARY)[1]
  draw = cv2.dilate(draw, es, iterations = 2)
  image, contours, hierarchy = cv2.findContours(draw.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
  for c in contours:
    drawCnt(frame, c)
  cv2.imshow("motion detection", frame)
  if cv2.waitKey(1000 / 12) & 0xff == ord("q"):
      break

camera.release()
Esempio n. 46
0
File: Video.py Progetto: n17r4m/mpyx
        def __init__(self, window_size=None, img_shape=None):

            import cv2

            self.fgbg = cv2.createBackgroundSubtractorKNN(detectShadows=True)
            self.kern = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
Esempio n. 47
0
def main():
  # camera = cv2.VideoCapture(path.join(path.dirname(__file__), "traffic.flv"))
  camera = cv2.VideoCapture(path.join(path.dirname(__file__), "768x576.avi"))
  # camera = cv2.VideoCapture(path.join(path.dirname(__file__), "..", "movie.mpg"))
  # camera = cv2.VideoCapture(0)
  history = 20
  # KNN background subtractor
  bs = cv2.createBackgroundSubtractorKNN()

  # MOG subtractor
  # bs = cv2.bgsegm.createBackgroundSubtractorMOG(history = history)
  # bs.setHistory(history)

  # GMG
  # bs = cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames = history)
  
  cv2.namedWindow("surveillance")
  pedestrians = {}
  firstFrame = True
  frames = 0
  fourcc = cv2.VideoWriter_fourcc(*'XVID')
  out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
  while True:
    print " -------------------- FRAME %d --------------------" % frames
    grabbed, frame = camera.read()
    if (grabbed is False):
      print "failed to grab frame."
      break

    fgmask = bs.apply(frame)

    # this is just to let the background subtractor build a bit of history
    if frames < history:
      frames += 1
      continue


    th = cv2.threshold(fgmask.copy(), 127, 255, cv2.THRESH_BINARY)[1]
    th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 2)
    dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,3)), iterations = 2)
    image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    counter = 0
    for c in contours:
      if cv2.contourArea(c) > 500:
        (x,y,w,h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 1)
        # only create pedestrians in the first frame, then just follow the ones you have
        if firstFrame is True:
          pedestrians[counter] = Pedestrian(counter, frame, (x,y,w,h))
        counter += 1
    

    for i, p in pedestrians.iteritems():
      p.update(frame)
    
    firstFrame = False
    frames += 1

    cv2.imshow("surveillance", frame)
    out.write(frame)
    if cv2.waitKey(110) & 0xff == 27:
        break
  out.release()
  camera.release()
Esempio n. 48
0
rh = statistics.mode(rightheight)

avgw = (lw+rw)/2
avgh = (lh+rh)/2

print(int(avgw),int(avgh))
# t = int(time.time())*1000
# print(time.time()*1000)
# Going Through Video again


# Reading Video
cap = cv2.VideoCapture('2.mp4')

# Background Subtraction
mask = cv2.createBackgroundSubtractorKNN(history=1, dist2Threshold=15, detectShadows=False)
# Making matrix for Erosion, dilation and morphing
kernel = np.ones((2, 2), np.uint8)
kernel1 = np.ones((1, 2), np.uint8)

it = 0
countL = 0
countR = 0
pLx=0
pLy=0
pRx=0
pRy=0
pLw=0
pRw=0
pLh=0
pRh=0