Beispiel #1
0
class App:
    def __init__(self, video_src, paused = False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused


    def nextimage(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #2
0
 def __init__(self,
              outputDest,
              trackAlgorithm='mosse',
              drawKp=False,
              doClassify=False,
              doActRecog=True,
              outFps=30,
              retrain=False,
              matchEdges=False,
              filterVelocity=True):
     self.tracks = []
     self.trackCount = 0
     self.doClassify = doClassify
     self.doActRecog = doActRecog
     self.drawKeypoints = drawKp
     self.trackAlgorithm = trackAlgorithm
     self.vCubeDimens = (75, 100, 45)
     self.outFps = outFps
     self.doTrackWrite = False
     self.rect_sel = RectSelector('frame', self.onrect)
     self.lastFrame = None
     if self.doClassify:
         self.classifier = ObjectClassifier()
     if self.doActRecog:
         self.actionRecognizer = ActionRecognize(
             'actions/',
             self.vCubeDimens,
             matchEdges=matchEdges,
             retrain=retrain,
             filterVelocity=filterVelocity)
     self.outputFile = open(outputDest, 'w')
     self.frameCount = 0
Beispiel #3
0
 def __init__(self, video_src, paused=False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
Beispiel #4
0
class App:
    def Open(self):
        self.paused = False
        cv.namedWindow(titleWindow)
        self.rect_sel = RectSelector(titleWindow, self.onrect)
        self.trackers = []
        PyStreamRun(self.OpenCVCode, titleWindow)

    def onrect(self, rect):
        frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def OpenCVCode(self, imgRGB, depth32f, frameCount    ):
        self.frame = imgRGB.copy()
        if not self.paused:
            frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
            for tracker in self.trackers:
                tracker.update(frame_gray)

        for tracker in self.trackers:
            tracker.draw_state(imgRGB)
        if len(self.trackers) > 0:
            cv.imshow('tracker state', self.trackers[-1].state_vis)
        self.rect_sel.draw(imgRGB)

        cv.imshow(titleWindow, imgRGB)
        ch = cv.waitKey(10)
        if ch == ord(' '):
            self.paused = not self.paused
        if ch == ord('c'):
            self.trackers = []
        return imgRGB, None
Beispiel #5
0
 def __init__(self, cap, paused=False):
     self.cap = cap
     self.frame = self.cap.read()
     print(self.frame.shape)
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
 def __init__(self, video_src, paused=False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = [
     ]  #之所以命名为 trackers ,而不是 tracker,并且使用list来容纳多个元素,是因为 MOSSE 例程中支持多目标跟踪,可以通过添加跟踪器来构建多目标跟踪
     self.paused = paused
Beispiel #7
0
 def __init__(self, video_src, paused=False):
     #self.cap = video.create_capture(video_src)
     self.cap = VideoCapture(
         "rtsp://*****:*****@192.168.1.64:554/Streaming/Channels/102"
     )
     _, self.frame = self.cap.read()
     cv.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
 def __init__(self, video_src, paused=False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     # self.out is the output video writer
     fourcc = cv2.VideoWriter_fourcc(*'XVID')
     self.out = cv2.VideoWriter('output.avi', fourcc, 30,
                                (self.frame.shape[1], self.frame.shape[0]))
     cv2.imshow('frame', self.frame)
     # For manually selecting objects to track. Not using, but not removing either.
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
Beispiel #9
0
 def __init__(self, video_src, paused=False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     # self.video_src = video_src
     # self.frames = os.listdir(video_src)
     # print self.frames
     # self.frame = cv2.imread(os.path.join(video_src, self.frames[0]))
     print 'FrameShape: ', self.frame.shape
     cv2.imshow('Video', self.frame)
     self.rect_sel = RectSelector('Video', self.onrect)
     self.paused = paused
     self.trackers = []
Beispiel #10
0
class App:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        # self.video_src = video_src
        # self.frames = os.listdir(video_src)
        # print self.frames
        # self.frame = cv2.imread(os.path.join(video_src, self.frames[0]))
        print 'FrameShape: ', self.frame.shape
        cv2.imshow('Video', self.frame)
        self.rect_sel = RectSelector('Video', self.onrect)
        self.paused = paused
        self.trackers = []

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = STC(frame_gray, rect)
        self.trackers.append(tracker)  # multi-target

    def run(self):
        i = 0
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                # self.frame = cv2.imread(os.path.join(self.video_src, self.frames[i]))
                # i += 1
                if not ret:
                    print 'Get frame fail!'
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.updata_tracker(frame_gray)

            vis = self.frame.copy()
            self.rect_sel.draw(vis)

            for tracker in self.trackers:
                tracker.draw_state(vis)

            cv2.imshow('Video', vis)

            ch = cv2.waitKey(1)
            if ch == 27:
                cv2.destroyAllWindows()
                # self.cap.release()
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
                pass
Beispiel #11
0
class App:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []

            if len(self.trackers) > 0:
                if ch == ord('s'):
                    self.trackers[-1].saveCenter()
                if ch == ord('a'):
                    self.trackers[-1].saveLeft()
                if ch == ord('d'):
                    self.trackers[-1].saveRight()
                if ch == ord('w'):
                    self.trackers[-1].saveUp()
                if ch == ord('x'):
                    self.trackers[-1].saveDown()
Beispiel #12
0
    def __init__(self, cap, paused=False):
        self.cap = cap
        _, self.frame = self.cap.read()
        self.croph, self.cropw = 1080, 1920
        self.frameh, self.framew, _ = self.frame.shape
        self.original = self.frame
        self.frame = imutils.resize(self.frame,
                                    width=self.framew / 2,
                                    height=self.framew / 2)

        cv2.imshow('frame', self.frame)

        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused
Beispiel #13
0
class App:
    def __init__(self, cap, paused=False):
        self.cap = cap
        self.frame = self.cap.read()
        print(self.frame.shape)
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        # Define the codec and create VideoWriter object
        #fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        #the spec is for my webcam, the IP camera is 1920x1080
        #out = cv2.VideoWriter('output.avi',fourcc, 30.0, (1920,1080))

        while True:
            if not self.paused:
                self.frame = self.cap.read()
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            #out.write(vis)

            ch = cv2.waitKey(10) & 0xFF
            if ch == 27:
                #out.release()
                cv2.destroyAllWindows()
                print("released out")
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #14
0
 def __init__(self, video_src, paused = False):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.paused = paused
Beispiel #15
0
 def __init__(self, video_src, robotq, appq, launchspeed, swatted):
     self.cap = video.create_capture(video_src)
     _, self.frame = self.cap.read()
     cv2.namedWindow('frame')
     self.row = 0
     self.bounceshot = 0
     cv2.createTrackbar('row', 'frame', 0, 2, self.onrow)
     cv2.createTrackbar('speed', 'frame', 0, 512, self.onspeed)
     cv2.createTrackbar('bounceshot', 'frame', 0, 1, self.onbounceshot)
     cv2.imshow('frame', self.frame)
     self.rect_sel = RectSelector('frame', self.onrect)
     self.trackers = []
     self.robotq = robotq
     self.appq = appq
     self.launchspeed = launchspeed
     self.swatted = swatted
Beispiel #16
0
class App:
    def __init__(self, video_src, paused = False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = [] 
            #print("hi:%s" %self.hi())        
            try:
               talker()
            except rospy.ROSInterruptException:
               pass
Beispiel #17
0
class App:
    def __init__(self, video_src, paused = False):
        print "app init entered", paused
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        print "onrect enterd"
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []

        self.cap.release()
        cv2.destroyAllWindows()
class App:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = [
        ]  #之所以命名为 trackers ,而不是 tracker,并且使用list来容纳多个元素,是因为 MOSSE 例程中支持多目标跟踪,可以通过添加跟踪器来构建多目标跟踪
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                #对于 trackers 中的每一个 tracker 的当前处理图像都进行更新
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            #在处理完成的图像上将 trackers 中的每一个 tracker 的处理结果绘制出来(框)
            for tracker in self.trackers:
                tracker.draw_state(vis)
            #在另外的一个小窗口中绘制 trackers 中最后一个 tracker 的跟踪结果(框内的小图像,MOSSE 示例中放的是原框大小的图像,我觉得应该作一个判断,比如可以将框内的图像放大到原来的2倍再放到小窗口中)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            #经本人实测,MOSSE里程中waitKey的值默认为10,这种情形下的跟踪速度就已经非常可观了,我觉得如果在我的算法中将此处改为1后,再加上CMT不进行全局搜索,那实时性依然非常强
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #19
0
class App:
    def __init__(self, video_src, paused=False):
        print "app init entered", paused
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        print "onrect enterd"
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []

        self.cap.release()
        cv2.destroyAllWindows()
Beispiel #20
0
class App:
    InterruptedFlag = 0

    def __init__(self, video_src, paused=False):
        self.cap = cv2.VideoCapture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                # print self.frame.shape
                self.frame = cv2.flip(self.frame, 1)
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10) & 0xFF
            if ch == ord('q'):
                App.InterruptedFlag = 0
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
        cv2.destroyAllWindows()
        return
Beispiel #21
0
class App:
    def __init__(self, video_src, paused=False):
        #self.cap = video.create_capture(video_src)
        self.cap = VideoCapture(
            "rtsp://*****:*****@192.168.1.64:554/Streaming/Channels/102"
        )
        _, self.frame = self.cap.read()
        cv.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv.cvtColor(self.frame, cv.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv.imshow('frame', vis)
            ch = cv.waitKey(10)
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #22
0
class App:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow("frame", self.frame)
        self.rect_sel = RectSelector("frame", self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow("tracker state", self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow("frame", vis)
            ch = cv2.waitKey(10) & 0xFF
            if ch == 27:
                break
            if ch == ord(" "):
                self.paused = not self.paused
            if ch == ord("c"):
                self.trackers = []
Beispiel #23
0
class Capture:
    def __init__(self, video_src, paused = False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = Tracker(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        if not self.paused:
            ret, self.frame = self.cap.read()
            if not ret:
                return
            frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            for tracker in self.trackers:
                tracker.update(frame_gray)

        vis = self.frame.copy()
        for tracker in self.trackers:
            tracker.draw_state(vis)
        if len(self.trackers) > 0:
            cv2.imshow('tracker state', self.trackers[-1].state_vis)
        self.rect_sel.draw(vis)

        cv2.imshow('frame', vis)
        ch = cv2.waitKey(10) & 0xFF
        if ch == 27:
            return
        if ch == ord(' '):
            self.paused = not self.paused
        if ch == ord('c'):
            self.trackers = []
Beispiel #24
0
class Capture:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = Tracker(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        if not self.paused:
            ret, self.frame = self.cap.read()
            if not ret:
                return
            frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            for tracker in self.trackers:
                tracker.update(frame_gray)

        vis = self.frame.copy()
        for tracker in self.trackers:
            tracker.draw_state(vis)
        if len(self.trackers) > 0:
            cv2.imshow('tracker state', self.trackers[-1].state_vis)
        self.rect_sel.draw(vis)

        cv2.imshow('frame', vis)
        ch = cv2.waitKey(10) & 0xFF
        if ch == 27:
            return
        if ch == ord(' '):
            self.paused = not self.paused
        if ch == ord('c'):
            self.trackers = []
Beispiel #25
0
class App:
    def __init__(self, video_src, robotq, appq, launchspeed, swatted):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        cv2.namedWindow('frame')
        self.row = 0
        self.bounceshot = 0
        cv2.createTrackbar('row', 'frame', 0, 2, self.onrow)
        cv2.createTrackbar('speed', 'frame', 0, 512, self.onspeed)
        cv2.createTrackbar('bounceshot', 'frame', 0, 1, self.onbounceshot)
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.robotq = robotq
        self.appq = appq
        self.launchspeed = launchspeed
        self.swatted = swatted

    def nothing(*arg):
        pass

    def onrow(self, row):
        '''When the row is changed, update the speed.'''
        self.row = row
        if self.bounceshot:
            if row == 0: speed = 160
            elif row == 1: speed = 165
            elif row == 2: speed = 170
        else:
            if row == 0: speed = 230
            elif row == 1: speed = 235
            elif row == 2: speed = 240
        cv2.setTrackbarPos('speed', 'frame', speed)

    def onspeed(self, speed):
        '''When the speed is changed, send it to the robot.'''
        self.robotq.put((0, speed))

    def onbounceshot(self, bounceshot):
        '''When we toggle bounce shots, update the speed.'''
        self.bounceshot = bounceshot
        self.onrow(self.row)

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers = [tracker]

    def drawcrosshairs(self,
                       img,
                       width,
                       height,
                       color=(0, 255, 255),
                       thickness=1):
        p0 = int(width // 2), 0
        p1 = int(width // 2), int(height)
        cv2.line(img, p0, p1, color, thickness)
        p0 = int(width // 2) - int(width // 10), int(height // 2)
        p1 = int(width // 2) + int(width // 10), int(height // 2)
        cv2.line(img, p0, p1, color, thickness)

    def run(self):
        direction = 0
        while True:
            ret, self.frame = self.cap.read()
            self.frame = cv2.flip(self.frame, -1)
            if not ret:
                break
            frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            for tracker in self.trackers:
                tracker.update(frame_gray)

            vis = self.frame.copy()
            width = self.cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)
            height = self.cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)
            if len(self.trackers) > 0:
                x, _ = self.trackers[0].draw_state(vis)
                x = int(x)

                # Make the robot move toward the object
                if x < width // 2:
                    if direction >= 0:
                        print "Going left"
                        self.robotq.put((1, 100000, 1))
                        direction = -1
                elif x > width // 2:
                    if direction <= 0:
                        print "Going right"
                        self.robotq.put((1, 100000, 0))
                        direction = 1
                else:
                    print "Cup targeting complete"
                    self.robotq.put((1, 0, 0))
                    direction = 0
            elif direction != 0:
                self.robotq.put((1, 0, 0))
                direction = 0

            self.drawcrosshairs(vis, width, height)
            self.rect_sel.draw(vis)

            draw_str(vis, (5, 15),
                     "Launch speed: {}".format(self.launchspeed.value))
            draw_str(vis, (5, 30), "Swatted: {}".format(self.swatted.value))

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == 27:
                break
            if ch == ord('d'):
                print "Manually going right"
                self.robotq.put((1, 50, 0))
            if ch == ord('a'):
                print "Manually going left"
                self.robotq.put((1, 50, 1))
            if ch == ord(' '):
                print "Shooting"
                self.robotq.put('shoot')
            if ch == ord('s'):
                print "Swatting"
                self.robotq.put('swat')
            if ch == ord('c'):
                self.trackers = []
        cv2.destroyAllWindows()
        self.robotq.put('exit')
class MotionTracker:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(video_src)
        _, self.frame = self.cap.read()
        # self.out is the output video writer
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        self.out = cv2.VideoWriter('output.avi', fourcc, 30,
                                   (self.frame.shape[1], self.frame.shape[0]))
        cv2.imshow('frame', self.frame)
        # For manually selecting objects to track. Not using, but not removing either.
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect, len(self.trackers))
        self.trackers.append(tracker)

    def run(self):
        frame_number = 0
        csvf = open('tracks.csv', 'w')
        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                # First update existing trackers with current frame
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                # Remove mosse trackers that are statinary for 100 frames
                for tracker in self.trackers:
                    if (tracker.stationary_for_frames > 100):
                        self.trackers.remove(tracker)
                # Calculate the union of all existing trackers
                # The type of object returned depends on the relationship between the operands.
                # The union of polygons (for example) will be a polygon or a multi-polygon depending on whether they intersect or not.
                union_of_trackers = None
                index = 0
                for tracker in self.trackers:
                    # fix tracker index
                    tracker.index = index
                    index = index + 1
                    tracker.update(frame_gray)
                    (x, y), (w, h) = tracker.pos, tracker.size
                    # tracker returns center and size; convert it to topleft and bottomright points
                    x1, y1, x2, y2 = int(x - 0.5 * w), int(y - 0.5 * h), int(
                        x + 0.5 * w), int(y + 0.5 * h)
                    b = gs.box(x1, y1, x2, y2)
                    if not union_of_trackers:
                        union_of_trackers = b
                    else:
                        union_of_trackers = union_of_trackers.union(b)

                # Call MOG and get a list of large enough foregound rects
                rects = mog.getForegroundRects(self.frame)
                for rect in rects:
                    x, y, w, h = rect
                    r = gs.box(
                        x, y, x + w, y + h
                    )  # MOG returns topleft and size; need topleft and bottomright points
                    # check if this rect(MOG) is already mostly covered by other trackers(MOSSE)
                    if union_of_trackers:
                        common_area = union_of_trackers.intersection(r).area
                        ratio_covered = common_area / r.area
                        if ratio_covered > .6:
                            continue

                    # check if this rect(MOG) almost contains another tracker(MOSSE), if yes then update that tracker's rect
                    new_rect = True
                    for tracker in self.trackers:
                        (x, y), (w, h) = tracker.pos, tracker.size
                        x1, y1, x2, y2 = int(x - 0.5 * w), int(
                            y - 0.5 * h), int(x + 0.5 * w), int(y + 0.5 * h)
                        b = gs.box(x1, y1, x2, y2)
                        #if(r.area  > b.area):
                        common_area = r.intersection(b).area
                        ratio_covered = common_area / b.area
                        #print(ratio_covered)
                        if ratio_covered > .2:
                            if r.area / b.area > 1.2 or b.area / r.area > 1.2:
                                x1, y1, x2, y2 = r.union(b).bounds
                                self.trackers[tracker.index] = MOSSE(
                                    frame_gray,
                                    (int(x1), int(y1), int(x2), int(y2)),
                                    tracker.index,
                                    tracker.id_,
                                    reInit=True)
                            else:
                                tracker.stationary_for_frames = 0
                            new_rect = False
                            break

                    # otherwise new tracker found; append to the list of trackers
                    if new_rect:
                        x, y, w, h = rect
                        tracker = MOSSE(frame_gray, (x, y, x + w, y + h),
                                        len(self.trackers))
                        self.trackers.append(tracker)

                # write csv output file
                for tracker in self.trackers:
                    (x, y), (w, h) = tracker.pos, tracker.size
                    x1, y1, x2, y2 = int(x - 0.5 * w), int(y - 0.5 * h), int(
                        x + 0.5 * w), int(y + 0.5 * h)
                    if x1 < 0:
                        x1 = 0
                    if y1 < 0:
                        y1 = 0
                    if x2 > self.frame.shape[1] - 1:
                        x2 = self.frame.shape[1] - 1
                    if y2 > self.frame.shape[0] - 1:
                        y2 = self.frame.shape[0] - 1
                    writer = csv.writer(csvf)
                    writer.writerow(
                        (tracker.index, frame_number, x1, y1, x2, y2))

                # print("self.trackers", self.trackers)
                # print("frame number", frame_number)
                frame_number += 1

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            #if len(self.trackers) > 0:
            #    cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            self.out.write(vis)
            ch = cv2.waitKey(10) % 256
            if ch == 27:
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #27
0
class App:

    def __init__(self, cap, paused=False):
        self.cap = cap
        _, self.frame = self.cap.read()
        self.croph, self.cropw = 1080, 1920
        self.frameh, self.framew, _ = self.frame.shape
        self.original = self.frame
        self.frame = imutils.resize(self.frame,
                                    width=self.framew / 2,
                                    height=self.framew / 2)

        cv2.imshow('frame', self.frame)

        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self):
        # Define the codec and create VideoWriter object
        fourcc = cv2.VideoWriter_fourcc(*'DIVX')
        #the spec is for my webcam, the IP camera is 1920x1080
        out = cv2.VideoWriter('output.avi',fourcc, 30.0, (1920,1080))

        while True:
            if not self.paused:
                ret, self.frame = self.cap.read()
                self.original = self.frame
                if self.frame is None:
                    out.release()
                    cv2.destroyAllWindows()
                    break

                self.frame = imutils.resize(self.frame,
                                            width=self.framew / 2,
                                            height=self.framew / 2)

                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                for tracker in self.trackers:
                    x, y = tracker.pos[0]*2, tracker.pos[1]*2

                    #remember you are cropping from 3840,2160
                    if self.framew - (x + self.cropw/2) < 0:
                        x = self.framew -self.cropw/2
                    if self.frameh - (y + self.croph/2) < 0:
                        y = self.frameh - self.croph/2
                    if x - 960 < 0:
                        x = 960
                    if y - 540 < 0:
                        y = 540
                    subwin = cv2.getRectSubPix(self.original,
                                               (self.cropw, self.croph),
                                               (x,y))

                    out.write(subwin)
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)

            ch = cv2.waitKey(10) & 0xFF
            if ch == 27:
                out.release()
                cv2.destroyAllWindows()
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
Beispiel #28
0
        piThread = PiThread(getPiData)
        piThread.start()

        print('connecting...')
        d = ardrone.ARDrone()
        print('waiting for video...')
        while d.image is None:
            pass

        print('loading calibration info')
        mtx, dist, newcameramtx = getCalibrationInfo()

        out = None

        # cv2.imshow('frame', np.array([FRAME_HEIGHT, FRAME_WIDTH, 3]))    #######Changed
        rs = RectSelector('frame', startTracking)

        print('battery remaining:', d.navdata['demo']['battery'])
        updateBattery(d.navdata['demo']['battery'])
        if d.navdata['state']['emergency'] == 1:
            d.reset()

        while True:
            frame = cv2.cvtColor(np.array(d.image), cv2.COLOR_RGB2BGR)
            # frame = cv2.undistort(frame, mtx, dist)
            frame = cv2.undistort(frame, mtx, dist, None, newcameramtx)
            if rs.dragging:
                rs.draw(frame)
            if isTracking:
                tracker.update(frame)
                r = tracker.get_position()
Beispiel #29
0
 def Open(self):
     self.paused = False
     cv.namedWindow(titleWindow)
     self.rect_sel = RectSelector(titleWindow, self.onrect)
     self.trackers = []
     PyStreamRun(self.OpenCVCode, titleWindow)
Beispiel #30
0
class ObjectTracker:
    def __init__(self,
                 outputDest,
                 trackAlgorithm='mosse',
                 drawKp=False,
                 doClassify=False,
                 doActRecog=True,
                 outFps=30,
                 retrain=False,
                 matchEdges=False,
                 filterVelocity=True):
        self.tracks = []
        self.trackCount = 0
        self.doClassify = doClassify
        self.doActRecog = doActRecog
        self.drawKeypoints = drawKp
        self.trackAlgorithm = trackAlgorithm
        self.vCubeDimens = (75, 100, 45)
        self.outFps = outFps
        self.doTrackWrite = False
        self.rect_sel = RectSelector('frame', self.onrect)
        self.lastFrame = None
        if self.doClassify:
            self.classifier = ObjectClassifier()
        if self.doActRecog:
            self.actionRecognizer = ActionRecognize(
                'actions/',
                self.vCubeDimens,
                matchEdges=matchEdges,
                retrain=retrain,
                filterVelocity=filterVelocity)
        self.outputFile = open(outputDest, 'w')
        self.frameCount = 0

    def seedTracks(self, img, mask, points):
        self.lastFrame = img
        contourlist = points[1]
        for contour in contourlist:

            rect = cv2.boundingRect(contour)
            x, y, w, h = rect

            # TBD: remove this once we can fix whole first frame being detected as track
            if w > 10000 or h < 100:  #cv2.contourArea(contour) <= 5000:
                continue

            # is this blob close to a known object?
            foundTrack = False

            for track in self.tracks:
                if track.isActive() and track.matches(rect, img):
                    foundTrack = True
                    continue

            # start new track
            if foundTrack == False and not self.isNearEdge(rect, img):
                self.startTrack(img, rect, mask)

    def onrect(self, rect):
        # API delivers top left/bottom right corners so we need to convert
        x1, y1, x2, y2 = rect
        trackRect = (x1, y1, x2 - x1, y2 - y1)
        self.startTrack(self.lastFrame, trackRect)

    def startTrack(self, img, rect, mask=None):
        self.trackCount = self.trackCount + 1
        ntrck = getTrackInst(self.trackCount, self.trackAlgorithm, rect, img,
                             mask)
        ntrck.setOutFps(self.outFps)
        self.tracks.append(ntrck)
        print('New track #' + repr(self.trackCount) + ' ' + repr(rect))

    def writeTracks(self, frame):
        for track in self.tracks:
            x, y, w, h = track.getNewestRect()
            lost = '1' if not track.isActive() else '0'
            occluded = '1' if track.isOccluded() else '0'
            generated = '1' if track.isOccluded() else '0'
            self.outputFile.write('' + repr(track.getTrackId() - 1) + ' ' +
                                  repr(x) + ' ' + repr(y) + ' ' + repr(x + w) +
                                  ' ' + repr(y + w) + ' ' + repr(frame) + ' ' +
                                  lost + ' ' + occluded + ' ' + generated +
                                  ' "' + track.getLastAction() + '"')
            self.outputFile.write("\n")

    def saveTracks(self):
        for track in self.tracks:
            track.saveLastTracklet()

    def clearTracks(self):
        self.tracks = []
        #self.trackCount = 0

    def update(self, img):
        self.frameCount = self.frameCount + 1
        for track in self.tracks:
            if track.isActive():
                track.update(img)
                if self.doActRecog and self.frameCount % 5 == 0:
                    action = self.actionRecognizer.getAction(
                        track.getLastTracklet(45))
                    track.updateAction(action)

                if self.doClassify:
                    ttype, kp = self.classifier.classify(
                        img, track.getNewestRect())
                    track.updateClass(ttype, kp)

    def toggleDoTrackWrite(self):
        self.doTrackWrite = not self.doTrackWrite
        #for track in self.tracks:
        #    track.setDoTrackWrite(self.doTrackWrite)

    def isNearEdge(self, rect, img, thresh=50):
        x, y, w, h = rect
        ih, iw = img.shape[:2]
        return x < thresh or y < thresh or x + w > iw - thresh or y + h > ih - thresh

    def drawBounds(self,
                   title,
                   img,
                   color=(0, 255, 0),
                   occludeColor=(0, 0, 255)):
        self.rect_sel.draw(img)
        useColor = color
        height, width = img.shape[:2]

        for track in self.tracks:
            if not track.isActive():
                continue
            if track.isOccluded():
                useColor = occludeColor
            else:
                useColor = color

            x, y, w, h = track.getNewestRect()
            xt, yt, wt, ht = track.getTrackletRect()

            if self.doActRecog:
                action = track.getLastAction()
            else:
                action = 'noActRec'

            lbl = '#' + repr(track.getTrackId()) + ' ' + action + '(' + repr(
                w) + ',' + repr(h) + ')'

            #if self.doClassify:
            #    ttype = track.getLastClass()
            #else:
            #    ttype = 'noclassify'

            #keypoints = track.getLastKeypoints()

            #if self.drawKeypoints and not keypoints is None:

            #    for kp in keypoints:
            #        kp.pt = (kp.pt[0]+x, kp.pt[1]+y)

            #    cv2.drawKeypoints(img,keypoints,img)

            #    lbl = '#' + repr(track.getTrackId()) + ' ' + ttype + ' (' + repr(len(keypoints)) + ')'
            #else:
            #    lbl = '#' + repr(track.getTrackId()) + ' ' + ttype

            cv2.rectangle(img, (x, y), (x + w, y + h), useColor, 1)

            if not self.doActRecog:
                cv2.rectangle(img, (xt, yt), (xt + wt, yt + ht), (0, 255, 255),
                              1)

            if track.isOccluded() and False:
                sx, sy, sw, sh = track.getSearchWindow()
                cv2.rectangle(img, (sx, sy), (sx + sw, sy + sh), (0, 255, 255),
                              1)

            cv2.putText(img, lbl, (x, y + h + 15), 0, 0.5, useColor)

            if False:
                path = track.getPath()
                predicted = track.getPredicted()
                currPoint = 0
                drawPoint = 1  #limit number of points drawn

                for point in path:
                    currPoint = currPoint + 1
                    if currPoint % drawPoint == 0:
                        cv2.putText(img, '+', (int(point[0]), int(point[1])),
                                    0, 0.3, color)

                for point in predicted:
                    cv2.putText(img, '+', (int(point[0]), int(point[1])), 0,
                                0.3, occludeColor)

        if self.doTrackWrite:
            frmLbl = 'Total objects: ' + repr(len(self.tracks)) + ' WRITE'
        else:
            frmLbl = 'Total objects: ' + repr(len(self.tracks))

        cv2.putText(img, title, (15, 15), 0, 0.5, color)
        cv2.putText(img, frmLbl, (15, 30), 0, 0.5, color)
        return img

    def closeOut(self):
        self.outputFile.close()
Beispiel #31
0
def get_manul(sample_path, save_path):

    sample_rects = []
    file_idx = 0
    out_flag = False

    def Onrect(rect):
        sample_rects.append(rect)

    def filter_extr(file_name):
        return os.path.splitext(file_name)[-1] in ['.JPG', '.jpg', '.bmp']

    cv2.namedWindow('Frame', 1)
    rect_sel = RectSelector('Frame', Onrect)

    sample_files = os.listdir(sample_path)
    sample_files = filter(filter_extr, sample_files)

    for file in sample_files:

        sample_rects = []
        frame = cv2.imread(os.path.join(sample_path, file))
        frame = frame[:, :, ::-1]
        file_name = os.path.splitext(file)[0]

        while True:
            vis = frame.copy()
            rect_sel.draw(vis)
            draw_str(vis, (20, 20), file_name)
            for rect in sample_rects:
                x = rect[0]
                y = rect[1]
                w = rect[2] - rect[0]
                h = rect[3] - rect[1]
                draw_str(vis, (rect[0], rect[1] - 5),
                         '(%d,%d,%d,%d)' % (x, y, w, h))
                draw_rect(vis, rect)
            cv2.imshow('Frame', vis)
            ch = cv2.waitKey(1)
            if ch == 27:
                cv2.destroyAllWindows()
                out_flag = True
                break
            if ch == ord('n'):
                sample_rects = []
                break
            if ch == ord('s'):
                num_rects = len(sample_rects)
                print num_rects
                coor_file_name = file_name + '.txt'
                coor_file_path = os.path.join(sample_path, coor_file_name)
                fp = open(coor_file_path, 'wb')
                for idx_rect, rect in enumerate(sample_rects):
                    x0, y0, x1, y1 = rect
                    x_c = (x0 + x1) * 1. / 2
                    y_c = (y0 + y1) * 1. / 2
                    w = (x1 - x0) * 1.
                    h = (y1 - y0) * 1.
                    coor_res = '%f %f %f %f' % (x_c / vis.shape[1], y_c /
                                                vis.shape[0], w / vis.shape[1],
                                                h / vis.shape[0])
                    fp.write("0" + " " + coor_res + "\n")
                fp.close()
            if ch == ord('r'):
                sample_rects = []
        if out_flag:
            break
Beispiel #32
0
class App1:
    def __init__(self, video_src, paused=False):
        self.cap = video.create_capture(1)
        _, self.frame = self.cap.read()
        cv2.imshow('frame', self.frame)
        self.rect_sel = RectSelector('frame', self.onrect)
        self.trackers = []
        self.paused = paused

    def onrect(self, rect):
        frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        tracker = MOSSE(frame_gray, rect)
        self.trackers.append(tracker)

    def run(self, one):
        global required_area
        global max_area
        coun_max = 0
        while True:

            if not self.paused:
                ret, self.frame = self.cap.read()
                if not ret:
                    break
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                blurgray = cv2.GaussianBlur(frame_gray, (5, 5), 0)
                ret, thresh1 = cv2.threshold(
                    blurgray, 70, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
                #cv2.imshow('thresh',thresh1)
                image, contours, hierarchy = cv2.findContours(
                    thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                drawing = np.zeros(self.frame.shape, np.uint8)
                max_area = 0
                ci = 0
                for i in range(len(contours)):
                    cnt = contours[i]
                    area = cv2.contourArea(cnt)
                    #setting required_area
                    if (area > max_area):
                        max_area = area
                        if (max_area > required_area):
                            required_area = max_area
                        ci = i
                cnt = contours[ci]
                hull = cv2.convexHull(cnt)
                moments = cv2.moments(cnt)
                if moments['m00'] != 0:
                    cx = int(moments['m10'] / moments['m00'])  # cx = M10/M00
                    cy = int(moments['m01'] / moments['m00'])  # cy = M01/M00

                centr = (cx, cy)
                cv2.circle(self.frame, centr, 5, [0, 0, 255], 2)
                cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 2)
                cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 2)

                cnt = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True),
                                       True)
                hull = cv2.convexHull(cnt, returnPoints=False)

                if (1):
                    defects = cv2.convexityDefects(cnt, hull)
                    mind = 0
                    maxd = 0
                    coun = 0

                    for i in range(defects.shape[0]):
                        coun = coun + 1
                        s, e, f, d = defects[i, 0]
                        start = tuple(cnt[s][0])
                        end = tuple(cnt[e][0])
                        far = tuple(cnt[f][0])
                        dist = cv2.pointPolygonTest(cnt, centr, True)
                        cv2.line(self.frame, start, end, [0, 255, 0], 2)
                        cv2.circle(self.frame, far, 5, [0, 0, 255], -1)
                        cv2.circle(self.frame, start, 5, [255, 0, 0], -1)

                    i = 0
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(self.frame, str(coun), (0, 40), font, 1,
                                (0, 0, 0), 2)
                    cv2.putText(self.frame, str(coun_max), (0, 80), font, 1,
                                (0, 0, 0), 2)
                    (x, y) = win32api.GetCursorPos()
                    if (1):
                        if (coun == coun_max + 1):
                            while (one == 0):
                                (x, y) = win32api.GetCursorPos()
                                win32api.mouse_event(
                                    win32con.MOUSEEVENTF_WHEEL, x, y, 60, 0)
                                end = 1
                                one = 1
                        if (coun == coun_max - 1):
                            while (one == 0):
                                (x, y) = win32api.GetCursorPos()
                                win32api.mouse_event(
                                    win32con.MOUSEEVENTF_WHEEL, x, y, -60, 0)
                                end = 1
                                one = 1

                        if (coun == coun_max):
                            (x, y) = win32api.GetCursorPos()
                            #win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,int(screenWidth -4*x),int(2*y),0,0)
                            end = 0
                            one = 0

                    #else:
                    #pyautogui.hotkey('win','m')
                for tracker in self.trackers:
                    tracker.update(frame_gray)

            vis = self.frame.copy()
            for tracker in self.trackers:
                tracker.draw_state(vis, 0)
            if len(self.trackers) > 0:
                cv2.imshow('tracker state', self.trackers[-1].state_vis)
            self.rect_sel.draw(vis)

            cv2.imshow('frame', vis)
            ch = cv2.waitKey(10)
            if ch == ord('s'):
                coun_max = coun
                a = cx - 80
                b = cy - 80
                c = cx + 80
                d = cy + 80
                frame_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                tracker = MOSSE(frame_gray, (a, b, c, d))
                self.trackers.append(tracker)
            if ch == 27:
                self.cap.release()
                break
            if ch == ord(' '):
                self.paused = not self.paused
            if ch == ord('c'):
                self.trackers = []
                coun_max = 0
            if ch == ord('g'):
                print "__chirayu__"
                App1(video_src, paused='--pause' in opts).run(one)
                print "__harshit__"