Exemplo n.º 1
0
def main(videoPath="",
         verbose=False,
         videoWidth=1280,
         videoHeight=720,
         fontScale=1.0,
         inference=False,
         confidenceLevel=0.8):

    global hubManager

    try:
        logging.info('Python {}'.format(sys.version))
        logging.info(
            'Yolo Capture Azure IoT Edge Module. Press Ctrl-C to exit.')

        with VideoCapture(videoPath, verbose, videoWidth, videoHeight,
                          fontScale, inference,
                          confidenceLevel) as videoCapture:

            try:
                videoCapture.set_Wallpaper(videoCapture._frame_wp_init_iothub)
                hubManager = HubManager(messageTimeout=10000,
                                        protocol=IoTHubTransportProvider.MQTT,
                                        verbose=False,
                                        videoCapture=videoCapture)

                AppState.init(hubManager)
            except IoTHubError as iothub_error:
                logging.error("Unexpected error %s from IoTHub" % iothub_error)
                return

            videoCapture.start()

    except KeyboardInterrupt:
        logging.info('Video capture module stopped')
Exemplo n.º 2
0
 def procVideo(self, camera):
     self.__cameras.append(camera)
     videoCapture = VideoCapture(camera, self.__cameras.index(camera))
     self.waitingQueueDict[camera] = Queue(maxsize=Controller.__WAITING_QUEUE_MAXSIZE)
     p = multiprocessing.Process(target=videoCapture.captureFrameByFfmpeg, args=(self.waitingQueueDict[camera],))
     self.__processes.append(p)
     p.start()
Exemplo n.º 3
0
    def AddCamera(self, index, enum):
        """Add a new camera in the Python dictionary."""
        key = str(index)
        if self.__devices.has_key(key):
            return False

        self.__devices[key] = VideoCapture(index)
        if enum is CAMERA_VIDEOCAPTURE_320X240:
            self.__devices[key].Size = (320, 240)
        elif enum is CAMERA_VIDEOCAPTURE_320X240_15FPS:
            self.__devices[key].Size = (320, 240)
            self.__devices[key].FPS  = 15
        elif enum is CAMERA_VIDEOCAPTURE_320X240_30FPS:
            self.__devices[key].Size = (320, 240)
            self.__devices[key].FPS  = 30
        elif enum is CAMERA_VIDEOCAPTURE_640X480:
            self.__devices[key].Size = (640, 480)
        elif enum is CAMERA_VIDEOCAPTURE_640X480_15FPS:
            self.__devices[key].Size = (640, 480)
            self.__devices[key].FPS  = 15
        else:
            self.__devices[key].Size = (640, 480)
            self.__devices[key].FPS  = 30        

        self.__Calibration(key)

        return True
Exemplo n.º 4
0
    def __init__(self, window, video_source=0):
        self.window = window
        self.customFont = tkFont.Font(family="Product Sans", size=14)
        self.window.title("ASL Translator - Sufiyaan Nadeem")
        self.window.iconbitmap("Images\\asl_logo_2.ico")  #only .ico works
        self.window.configure(background="#ffffff")
        self.video_source = video_source

        self.screenHeight = self.window.winfo_screenheight()
        self.screenWidth = self.window.winfo_screenwidth()

        # open video source (by default this will try to open the computer webcam)
        self.vid = VideoCapture(self.video_source)
        #self.windowX=self.screenWidth/2-self.vid.width/2
        #self.windowY=self.screenHeight/2-self.vid.height/2

        #Type of filter than can be applied later on
        self.vidProcessing = BackgroundSubtract()

        # Create a canvas that can fit the above video source size
        self.canvas = tkinter.Canvas(window,
                                     width=self.vid.width,
                                     height=self.vid.height)
        self.canvas.pack()

        # Button that lets the user take a snapshot
        self.btn_snapshot = tkinter.Button(window,
                                           text="Snapshot",
                                           width=50,
                                           command=self.snapshot,
                                           font=self.customFont)
        self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
        """Documentation Button
        self.loadimage = tkinter.PhotoImage(file="Images\documentation.png")
        self.roundedbutton =tkinter.Button(window, image=self.loadimage,command=self.snapshot)
        self.roundedbutton["bg"] = "white"
        self.roundedbutton["border"] = "0"
        self.roundedbutton.pack(side="top")"""
        #self.window.geometry("%dx%d+%d+%d"%(self.screenWidth,self.screenHeight,self.windowX,self.windowY))
        self.window.geometry("%dx%d+%d+%d" %
                             (self.screenWidth, self.screenHeight, 0, 0))

        # After it is called once, the update method will be automatically called every delay milliseconds
        self.delay = 15
        self.update()

        self.window.mainloop()
Exemplo n.º 5
0
    def start_live(self):
        self.hasCapture = 1
        self.capture = VideoCapture(self)
        self.pauseButton.clicked.connect(self.pause)
        self.recordButton.clicked.connect(self.record)

        self.cam_view_1.activated[str].connect(self.interface_view_1)
        self.cam_view_2.activated[str].connect(self.interface_view_2)
        self.cam_view_3.activated[str].connect(self.interface_view_3)

        self.cam_1_fliph.stateChanged.connect(self.fliph1)
        self.cam_2_fliph.stateChanged.connect(self.fliph2)
        self.cam_3_fliph.stateChanged.connect(self.fliph3)
        self.cam_1_flipv.stateChanged.connect(self.flipv1)
        self.cam_2_flipv.stateChanged.connect(self.flipv2)
        self.cam_3_flipv.stateChanged.connect(self.flipv3)
        #self.capture.start("G",self.view_1,self.view_2,self.view_3)
        self.change_live("G")
Exemplo n.º 6
0
    def parameterOperation(self, argv):

        parameters = argv.split(' ')

        if parameters and parameters[0] != "exit" and len(parameters) == 3:
            self.filePath = parameters[0]

            self.outputVideoDataPath = self.filePath + ".mp4"

            self.videoCapture = VideoCapture(self.filePath)
            # 调节输出设定
            self.outputAdjustDataPath = "adjustData/" + parameters[1] + ".txt"
            print(self.outputAdjustDataPath)

            self.videoControl = VideoControl()

            if parameters[2] == "Play":
                self.videoControl.set_videoStatus(VideoStatus['Playing'].value)
            elif parameters[2] == "Pause":
                self.videoControl.set_videoStatus(VideoStatus['Paused'].value)
Exemplo n.º 7
0
    def __init__(self, parent, **kwargs):
        global th
        Frame.__init__(self, parent, **kwargs)
        self.message_list = [
            'A', 0, 0, 0, 0, 0, 0, 0, 0, 0, 'B', 0, 0, 0, 0, 0, 0, 0, 0, 0,
            'C', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '#'
        ]
        ''' making some frames '''
        self.top_frame = Frame(parent)
        self.bottom_frame = Frame(parent)
        self.sub_frame_A = Frame(self.top_frame)
        self.sub_frame_B = Frame(self.top_frame)
        self.sub_frame_C = Frame(self.top_frame)
        self.sub_frame_D = Frame(self.bottom_frame)
        self.sub_frame_E = Frame(self.bottom_frame)
        self.sub_frame_F = Frame(self.bottom_frame)

        self.predmet_num = 1
        self.flag = False
        # open video source
        self.Video = VideoCapture(0)

        self.label_A = Label(self.top_frame,
                             text=DASH_COUNT * "-" + " Spremnik 1 " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=0)
        self.label_B = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik 2 " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=1)
        self.label_C = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik 3 " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=2)
        self.spremnik_A = Spremnik(self, self.sub_frame_A)
        self.spremnik_B = Spremnik(self, self.sub_frame_B)
        self.spremnik_C = Spremnik(self, self.sub_frame_C)
        # Create a canvas that can fit the above video source size
        self.canvas = Canvas(self.sub_frame_F,
                             width=self.Video.width,
                             height=self.Video.height)
        self.terminal = Text(self.sub_frame_D, height=20, width=40)
        self.terminal_scrollbar = Scrollbar(self.sub_frame_D,
                                            command=self.terminal.yview)
        self.terminal.config(yscrollcommand=self.terminal_scrollbar.set)
        self.analyze_sort_btn = Button(self.sub_frame_E,
                                       height=3,
                                       text="SORT",
                                       command=self.Analyze_Sort,
                                       width=8)
        self.start_stop_btn = Button(self.sub_frame_E,
                                     height=3,
                                     text="START",
                                     bg="light green",
                                     command=self.Start_Stop,
                                     width=8)
        ''' gridding and packing '''
        self.sub_frame_A.grid(row=1, column=0)
        self.sub_frame_B.grid(row=1, column=1)
        self.sub_frame_C.grid(row=1, column=2)
        self.sub_frame_D.grid(row=0, column=0)
        self.sub_frame_E.grid(row=0, column=1, padx=60)
        self.sub_frame_F.grid(row=0, column=2)
        self.terminal.grid(row=0, column=0)
        self.terminal_scrollbar.grid(row=0, column=1, sticky='ns')
        self.analyze_sort_btn.grid(row=0, column=0, pady=35)
        self.start_stop_btn.grid(row=1, column=0)
        self.canvas.grid(row=0, column=0)
        self.top_frame.pack()
        Label(parent, text=270 * '-').pack()
        self.bottom_frame.pack()

        self.delay = 4
        self.received_masa = ""
        th.start()
        self.update()
Exemplo n.º 8
0
class VideoDisplayWidget(QWidget):
    def __init__(self, parent):
        super(VideoDisplayWidget, self).__init__(parent)
        self.hasCapture = 0
        self.flag_paused = 0
        self.flag_record = 0
        ## HD RES- HEIGHT 720 WIDTH 1280
        ## FULL HD RES- HEIGHT 1080 WIDTH 1920
        ## VGA - HEIGHT 480 WIDTH 640
        self.CAM_HEIGHT = 480
        self.CAM_WIDTH = 640
        self.fliphlist = [0, 0, 0]
        self.flipvlist = [0, 0, 0]
        self.cam = []
        self.FPS = 60

        self.i = 0
        try:
            while (cv2.VideoCapture(self.i).isOpened()):
                #self.cam[self.i-1].set(cv2.CV_CAP_PROP_SETTINGS, 0);
                self.cam.append(cv2.VideoCapture(self.i))
                self.cam[self.i].set(3, self.CAM_WIDTH)
                self.cam[self.i].set(4, self.CAM_HEIGHT)
                self.i = self.i + 1
                print("camera -" + str(self.i) + " detected")
                print("Camera -" + str(self.i) + "Properties:")
                print(self.cam[self.i - 1].get(3))
                print(self.cam[self.i - 1].get(4))
        except Exception as e:
            print(e)
            print("All cameras detected.")

        if (self.i > 1):
            self.view_1 = 3
            self.view_2 = 2
            self.view_3 = 1
        else:
            self.view_1 = 0
            self.view_2 = 0
            self.view_3 = 0
        self.layout = QFormLayout(self)
        print("Pixmap created")
        self.label = QLabel()
        self.liveButton = QPushButton('Live', parent)
        self.liveButton.clicked.connect(self.start_live)
        self.liveButton.setFixedWidth(50)
        self.recordButton = QPushButton('Start Recording', parent)
        self.recordButton.setFixedWidth(100)
        self.pauseButton = QPushButton('Pause', parent)
        self.pauseButton.setFixedWidth(50)
        self.cam_view = QComboBox(self)

        hbox = QHBoxLayout()
        hbox.addWidget(self.liveButton)
        hbox.addWidget(self.recordButton)
        hbox.addWidget(self.pauseButton)
        hbox.addWidget(self.cam_view)
        self.layout.addRow(hbox)

        self.setLayout(self.layout)
        print("Layout Completed")
        self.cam_1_fliph = QCheckBox("FLIP CAM 1 Horz", self)
        self.cam_2_fliph = QCheckBox("FLIP CAM 2 Horz", self)
        self.cam_3_fliph = QCheckBox("FLIP CAM 3 Horz", self)

        self.cam_1_flipv = QCheckBox("FLIP CAM 1 Vert", self)
        self.cam_2_flipv = QCheckBox("FLIP CAM 2 Vert", self)
        self.cam_3_flipv = QCheckBox("FLIP CAM 3 Vert", self)

        hboxflip = QHBoxLayout()
        hboxflip.addWidget(self.cam_1_fliph)
        hboxflip.addWidget(self.cam_2_fliph)
        hboxflip.addWidget(self.cam_3_fliph)
        self.layout.addRow(hboxflip)

        vboxflip = QHBoxLayout()
        vboxflip.addWidget(self.cam_1_flipv)
        vboxflip.addWidget(self.cam_2_flipv)
        vboxflip.addWidget(self.cam_3_flipv)
        self.layout.addRow(vboxflip)

        self.cam_view_1 = QComboBox(self)
        self.cam_view_2 = QComboBox(self)
        self.cam_view_3 = QComboBox(self)
        count = 0
        while (count < len(self.cam)):
            self.cam_view_1.addItem("CAM_" + str(count))
            self.cam_view_2.addItem("CAM_" + str(count))
            self.cam_view_3.addItem("CAM_" + str(count))
            count += 1

        box_cam = QHBoxLayout()
        box_cam.addWidget(self.cam_view_1)
        box_cam.addWidget(self.cam_view_2)
        box_cam.addWidget(self.cam_view_3)
        self.layout.addRow(box_cam)

        self.setLayout(self.layout)
        print("Layout Completed")

    def interface_view_1(self, text):
        if (text == "CAM_1"):
            self.view_1 = 1
        elif (text == "CAM_2"):
            self.view_1 = 2
            print("View Changed")
        elif (text == "CAM_3"):
            self.view_1 = 3
            print("View Changed")
        elif (text == "CAM_0"):
            self.view_1 = 0
            print("View Changed")
        self.capture.start("G", self.view_1, self.view_2, self.view_3)

    def interface_view_2(self, text):
        if (text == "CAM_1"):
            self.view_2 = 1
        elif (text == "CAM_2"):
            self.view_2 = 2
            print("View Changed")
        elif (text == "CAM_3"):
            self.view_2 = 3
            print("View Changed")
        elif (text == "CAM_0"):
            self.view_2 = 0
            print("View Changed")
        self.capture.start("G", self.view_1, self.view_2, self.view_3)

    def interface_view_3(self, text):
        if (text == "CAM_1"):
            self.view_3 = 1
        elif (text == "CAM_2"):
            self.view_3 = 2
            print("View Changed")
        elif (text == "CAM_3"):
            self.view_3 = 3
            print("View Changed")
        elif (text == "CAM_0"):
            self.view_3 = 0
            print("View Changed")
        self.capture.start("G", self.view_1, self.view_2, self.view_3)

    def change_live(self, text):
        self.capture.start("G", self.view_1, self.view_2, self.view_3)

    def start_live(self):
        self.hasCapture = 1
        self.capture = VideoCapture(self)
        self.pauseButton.clicked.connect(self.pause)
        self.recordButton.clicked.connect(self.record)

        self.cam_view_1.activated[str].connect(self.interface_view_1)
        self.cam_view_2.activated[str].connect(self.interface_view_2)
        self.cam_view_3.activated[str].connect(self.interface_view_3)

        self.cam_1_fliph.stateChanged.connect(self.fliph1)
        self.cam_2_fliph.stateChanged.connect(self.fliph2)
        self.cam_3_fliph.stateChanged.connect(self.fliph3)
        self.cam_1_flipv.stateChanged.connect(self.flipv1)
        self.cam_2_flipv.stateChanged.connect(self.flipv2)
        self.cam_3_flipv.stateChanged.connect(self.flipv3)
        #self.capture.start("G",self.view_1,self.view_2,self.view_3)
        self.change_live("G")

    def fliph1(self, state):
        if (self.fliphlist[0] == 0):
            self.fliphlist[0] = 1
        else:
            self.fliphlist[0] = 0

    def fliph2(self, state):
        if (self.fliphlist[1] == 0):
            self.fliphlist[1] = 1
        else:
            self.fliphlist[1] = 0

    def fliph3(self, state):
        if (self.fliphlist[2] == 0):
            self.fliphlist[2] = 1
        else:
            self.fliphlist[2] = 0

    def flipv1(self, state):
        if (self.flipvlist[0] == 0):
            self.flipvlist[0] = 1
        else:
            self.flipvlist[0] = 0

    def flipv2(self, state):
        if (self.flipvlist[1] == 0):
            self.flipvlist[1] = 1
        else:
            self.flipvlist[1] = 0

    def flipv3(self, state):
        if (self.flipvlist[2] == 0):
            self.flipvlist[2] = 1
        else:
            self.flipvlist[2] = 0

    def record(self):
        if self.flag_record == 0:
            self.recordButton.setText("Stop Recording")
            self.capture.start_record()
            self.flag_record = 1
        else:
            self.recordButton.setText("Start Recording")
            self.capture.stop_record()
            self.flag_record = 0

    def pause(self):
        if (self.flag_paused == 0):
            self.pauseButton.setText("Unpause")
            self.pauseButton.setFixedWidth(80)
            self.capture.pause()
            self.flag_paused = 1
        else:
            self.pauseButton.setText("Pause")
            self.pauseButton.setFixedWidth(50)
            self.capture.start("G")
            self.flag_paused = 0
Exemplo n.º 9
0
class Window:
    """
    Sets up the User Interface of the Window; including buttons, fonts, titles, etc. 

    :param window The Tkinter Window
    :param video_source The Video Source
    """
    def __init__(self, window, video_source=0):
        self.window = window
        self.customFont = tkFont.Font(family="Product Sans", size=14)
        self.window.title("ASL Translator - Sufiyaan Nadeem")
        self.window.iconbitmap("Images\\asl_logo_2.ico")  #only .ico works
        self.window.configure(background="#ffffff")
        self.video_source = video_source

        self.screenHeight = self.window.winfo_screenheight()
        self.screenWidth = self.window.winfo_screenwidth()

        # open video source (by default this will try to open the computer webcam)
        self.vid = VideoCapture(self.video_source)
        #self.windowX=self.screenWidth/2-self.vid.width/2
        #self.windowY=self.screenHeight/2-self.vid.height/2

        #Type of filter than can be applied later on
        self.vidProcessing = BackgroundSubtract()

        # Create a canvas that can fit the above video source size
        self.canvas = tkinter.Canvas(window,
                                     width=self.vid.width,
                                     height=self.vid.height)
        self.canvas.pack()

        # Button that lets the user take a snapshot
        self.btn_snapshot = tkinter.Button(window,
                                           text="Snapshot",
                                           width=50,
                                           command=self.snapshot,
                                           font=self.customFont)
        self.btn_snapshot.pack(anchor=tkinter.CENTER, expand=True)
        """Documentation Button
        self.loadimage = tkinter.PhotoImage(file="Images\documentation.png")
        self.roundedbutton =tkinter.Button(window, image=self.loadimage,command=self.snapshot)
        self.roundedbutton["bg"] = "white"
        self.roundedbutton["border"] = "0"
        self.roundedbutton.pack(side="top")"""
        #self.window.geometry("%dx%d+%d+%d"%(self.screenWidth,self.screenHeight,self.windowX,self.windowY))
        self.window.geometry("%dx%d+%d+%d" %
                             (self.screenWidth, self.screenHeight, 0, 0))

        # After it is called once, the update method will be automatically called every delay milliseconds
        self.delay = 15
        self.update()

        self.window.mainloop()

    """
    Saves the current frame as an image
    """

    def snapshot(self):
        # Get a frame from the video source
        ret, frame = self.vid.get_frame()

        if ret:
            cv2.imwrite("frame-" + time.strftime("%d-%m-%Y-%H-%M-%S") + ".jpg",
                        cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))

    """
    Displays the current frame in the window 
    """

    def update(self):
        # Get a frame from the video source
        ret, frame = self.vid.get_frame()

        if ret:
            self.photo = PIL.ImageTk.PhotoImage(
                image=PIL.Image.fromarray(frame))
            self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)

        subtractedBack = self.vidProcessing.subtractBack(frame)
        gaussianBlurred = self.vidProcessing.gaussianBlur(frame)

        self.vidProcessing.mask(frame, subtractedBack)
        self.window.after(self.delay, self.update)
Exemplo n.º 10
0
Arquivo: App.py Projeto: avesk/RSCS
from flask import Flask, render_template, Response, request, jsonify
from werkzeug.contrib.cache import SimpleCache

import cv2 as cv
import importlib
import sys
import time
import json

from VideoCapture import VideoCapture
from Commands import Commands
from command_map import command_map

cache = SimpleCache()
app = Flask(__name__)
Camera = VideoCapture()
commands = Commands()

app = Flask(__name__)


@app.route('/')
def index():
    is_active = cache.get('active')
    print(f'is_active: {is_active}')
    if is_active == request.remote_addr or is_active == None:
        return render_template('index.html')
    else:
        return render_template('bot_in_use.html')

Exemplo n.º 11
0
class ImageEnhancement(Cmd):
    intro = '''====Image enhancement, designed by "Searching Center, Energysh"====
    Type help or ? to list commands.
    '''

    def __init__(self):
        super(ImageEnhancement, self).__init__()

        self.filePath = None

        self.outputAdjustData = None
        self.outputAdjustDataPath = None
        self.outputVideoData = None
        self.outputVideoDataPath = None

        self.videoWriter = None

        self.videoControl = None

        self.adjusters = None

        self.windowName = "AdjustWindow"

        self.videoCapture = None

        self.renderer = None

    def do_adjust(self, argv):
        '''input 3 parameter: filepath adjust_name video_status
            filepath,
            adjust name(snow_scene, forest_scene, asian, white, black... ),
            video status:(Playing, Pause)
        '''

        self.parameterOperation(argv)

        self.adjusterInit()

        self.adjustWindowSetting()

        self.trackbarSetting()

        self.videoOutputSetting()

        self.loopRun()

        self.videoOutput()

        self.unInit()

    # 锐化、饱和度、亮度调节对象
    def adjusterInit(self):

        # print("-------" + self.outputAdjustDataPath)

        self.adjusters = []
        self.renderer = Renderer()

        print(self.outputAdjustDataPath)
        # 读文件,如果存在的话
        if os.path.exists(self.outputAdjustDataPath):
            with open(self.outputAdjustDataPath, "r") as f:
                self.outputAdjustData = f.read()
                adjustDataDict = json.loads(self.outputAdjustData)
                # print(adjustDataDict)
                # outer loop run only once...
                for adjustItem in adjustDataDict[self.outputAdjustDataPath]:
                    for key in adjustItem:
                        d = adjustItem[key]
                        self.adjusters.append(
                            AdjusterFactory.createAdjuster(key, d))

        else:
            len(self.adjusters)
            for adjusterName in AdjusterFactory.defaultAdjustersNames:
                self.adjusters.append(
                    AdjusterFactory.createAdjuster(adjusterName))
            # print(len(self.adjusters))

    def adjustWindowSetting(self):
        cv2.namedWindow(self.windowName)
        cv2.resizeWindow(self.windowName, (400, 512))
        cv2.imshow(self.windowName, np.zeros((10, 512, 3), np.uint8))

    def parameterOperation(self, argv):

        parameters = argv.split(' ')

        if parameters and parameters[0] != "exit" and len(parameters) == 3:
            self.filePath = parameters[0]

            self.outputVideoDataPath = self.filePath + ".mp4"

            self.videoCapture = VideoCapture(self.filePath)
            # 调节输出设定
            self.outputAdjustDataPath = "adjustData/" + parameters[1] + ".txt"
            print(self.outputAdjustDataPath)

            self.videoControl = VideoControl()

            if parameters[2] == "Play":
                self.videoControl.set_videoStatus(VideoStatus['Playing'].value)
            elif parameters[2] == "Pause":
                self.videoControl.set_videoStatus(VideoStatus['Paused'].value)

    def do_exit(self, arg):
        'Stop run'
        print('Stop running')
        # self.close()
        return True

    # trackball control
    def trackbarSetting(self):
        # trackbar设定
        for adjuster in self.adjusters:
            # print(type(adjuster))
            adjuster.createTrackerBar(self.windowName)

        self.videoControl.createTrackerBar(self.windowName)

        self.renderer.createTrackerBar(self.windowName)

    def videoOutputSetting(self):
        # print(self.outputVideoDataPath)
        # print((self.videoCapture.get_size()[
        #       0], self.videoCapture.get_size()[1]))
        self.videoWriter = cv2.VideoWriter(
            self.outputVideoDataPath, cv2.VideoWriter_fourcc(*'MJPG'),
            self.videoCapture.get_fps(),
            (self.videoCapture.get_size()[0], self.videoCapture.get_size()[1]))

    def loopRun(self):

        ref, frame = self.videoCapture.read()

        height, width, _ = np.shape(frame)
        middle = width // 2

        # lasttime

        while True:
            bgr_image = frame

            resultImage = self.renderer.do_Rendering(bgr_image, self.adjusters)

            self.videoWriter.write(resultImage)

            cv2.imshow("image", resultImage)

            if (cv2.waitKey(1) & 0xFF == ord(' ')):
                cv2.waitKey(0)

            if cv2.waitKey(20) & 0xFF == ord('q'):
                break

            if self.videoCapture.isOpened(
            ) and self.videoControl.get_videoStatus(
            ) == VideoStatus.Playing.value:
                ref, frame = self.videoCapture.read()
                if not ref:
                    self.videoControl.set_videoStatus(VideoStatus.Quit.value)
                    break
            if self.videoControl.get_videoStatus() == VideoStatus.Quit.value:
                break
            else:
                continue

    def videoOutput(self):
        adjusterDataDict = []

        for adjusterObj in self.adjusters:
            adjusterDataDict.append(adjusterObj.getData())

        with open(self.outputAdjustDataPath, "w") as f:
            f.write(json.dumps({self.outputAdjustDataPath: adjusterDataDict}))

    def unInit(self):
        self.videoWriter.release()
        self.videoCapture.release()
        cv2.destroyAllWindows()
Exemplo n.º 12
0
import io
from tendo import singleton
from PIL import Image

from VideoCapture import VideoCapture
from ROI import ROI
from GUI import GUI

me = singleton.SingleInstance()

videoCapture = VideoCapture()

gui = GUI('LightMouseGun')

start_capture = False
show_preview = False
positions = [
    'top_left', 'top_center', 'top_right', 'middle_left', 'middle_center',
    'middle_right', 'bottom_left', 'bottom_center', 'bottom_right'
]
roi = ROI(positions[7])  #bottom_center

while True:
    button, values = gui.window.Read(timeout=0)

    if button is 'START':
        videoCapture.prepare_camera()
        if not videoCapture.cam_is_available():
            gui.popup('No camera detected')
            break
        videoCapture.start()
Exemplo n.º 13
0
"""

1. Capture camera image
2. Resize image and show
3. Add information for image
3. Detect the face and draw an rectangle around it

IMPORTANT: If camera fail on MacOS, type "sudo killall VDCAssistant" in the terminal

"""

import cv2
from VideoCapture import VideoCapture

video_capture = VideoCapture()

while True:
    video_capture.read()

    cv2.imshow('Capturing from camera', video_capture.get_numpay_frame())

    # Wait for 100 milliseconds to capture the image
    pressed_key = cv2.waitKey(100)

    # Break the loop if q is pressed
    if pressed_key == ord('q'):
        break

video_capture.release()
cv2.destroyAllWindows()
Exemplo n.º 14
0
 def setupVideoCapture(self):
     self.vid = VideoCapture(self.video_source)
     self.canvas = tkinter.Canvas(self.middleFrame,
                                  width=self.vid.width,
                                  height=self.vid.height)
     self.canvas.pack(side=tkinter.LEFT)
Exemplo n.º 15
0
class ASLRecognizerApp:
    def __init__(self, window, window_title, model, video_source=0):
        self.window = window
        self.window.title(window_title)
        self.model = model
        self.frame_iter = 0
        self.max_frame_iter = 10000  #Not to have too great numbers of fram iteration
        self.video_source = video_source

        # Adding button to snap
        self.btn_snapshot = tkinter.Button(window,
                                           text="Snapshot",
                                           width=50,
                                           command=self.snap)
        self.btn_snapshot.pack(anchor=tkinter.N, expand=True)

        self.middleFrame = tkinter.Frame(self.window)

        #Setting up the video capture
        self.setupVideoCapture()

        #Setting up the image documentation
        ASLDoc = self.loadImage("assets/ASLAlphabet.jpg", 700)
        self.labelAlphabet = tkinter.Label(self.middleFrame, image=ASLDoc)
        self.labelAlphabet.pack(side=tkinter.LEFT)

        self.middleFrame.pack()

        # Adding button to clear the text
        self.btn_snapshot = tkinter.Button(window,
                                           text="Clear",
                                           width=20,
                                           command=self.clearText)
        self.btn_snapshot.pack(anchor=tkinter.S)

        # Setting up the message display
        self.message = ""
        self.textDisplay = tkinter.Text(window)
        self.textDisplay.pack(anchor=tkinter.S, expand=True)

        self.delay = 2
        self.update()

        self.window.mainloop()

    def setupVideoCapture(self):
        self.vid = VideoCapture(self.video_source)
        self.canvas = tkinter.Canvas(self.middleFrame,
                                     width=self.vid.width,
                                     height=self.vid.height)
        self.canvas.pack(side=tkinter.LEFT)

    def loadImage(self, path, height):
        image = PIL.Image.open(path)
        imwidth, imheight = image.size
        imageRatio = imwidth / imheight
        image = image.resize((int(imageRatio * height), height),
                             PIL.Image.ANTIALIAS)
        image = PIL.ImageTk.PhotoImage(image)
        return image

    def setMessageDisplay(self):
        self.textDisplay.delete(1.0, "end")
        self.textDisplay.insert(1.0, self.message)

    def update(self):
        ret, frame, boundingBox = self.vid.get_frame()
        [X, Y, H, W] = boundingBox
        if ret:
            #display the rectangle to the frame
            start_point = (X, Y)
            end_point = (int(X + max(W, H)), int(Y + max(W, H)))
            frame = cv2.rectangle(frame, start_point, end_point, (0, 0, 255),
                                  2)
            if (self.frame_iter % 8 == 0):
                if ((H != 0 or W != 0)
                        and (X != 0 and Y != 0 and H != self.vid.height
                             and W != self.vid.width)):
                    #Preventing negative X and Y
                    X = max(0, X)
                    Y = max(0, Y)

                    #Making the snaped image square and take into account the borders
                    cropSize = min(max(H, W), self.vid.width - X,
                                   self.vid.height - Y)
                    end_point = (int(X + cropSize), int(Y + cropSize))

                    #Draws a red rectangle when it is snaping
                    frame = cv2.rectangle(frame, start_point, end_point,
                                          (255, 0, 0), 3)
                    self.gestureDetection(frame[Y:int(Y + cropSize),
                                                X:int(X + cropSize)])
            self.photo = PIL.ImageTk.PhotoImage(
                image=PIL.Image.fromarray(frame))
            self.canvas.create_image(0, 0, anchor=tkinter.NW, image=self.photo)
            self.frame_iter += 1
            if (self.frame_iter > self.max_frame_iter):
                self.frame_iter = 0

        self.window.after(self.delay, self.update)

    def gestureDetection(self, frame):
        letter = getModelPrediction(self.model, frame)
        if (letter == "space"):
            self.message += " "
        elif (letter == "del"):
            if (len(self.message) > 0):
                self.message = self.message[0:len(self.message) - 1]
        elif (len(letter) == 1):
            self.message += letter
        self.setMessageDisplay()

    def snap(self):
        ret, frame, boundingBox = self.vid.get_frame()
        if ret:
            self.gestureDetection(frame)

    def clearText(self):
        self.message = ""
        self.setMessageDisplay()
Exemplo n.º 16
0
class App(Frame):
    def __init__(self, root, **kwargs):
        Frame.__init__(self, root, **kwargs)
        self.message_list = [
            'A', 0, 0, 0, 0, 0, 0, 0, 0, 0, 'B', 0, 0, 0, 0, 0, 0, 0, 0, 0,
            'C', 0, 0, 0, 0, 0, 0, 0, 0, 0, '#'
        ]
        ''' making some frames '''
        self.top_frame = Frame(root)
        self.sub_frame_A = Frame(self.top_frame)
        self.sub_frame_B = Frame(self.top_frame)
        self.sub_frame_C = Frame(self.top_frame)
        self.bottom_frame = Frame(root)

        self.flag = 0

        # open video source
        self.Video = VideoCapture(0)

        self.label_A = Label(self.top_frame,
                             text=DASH_COUNT * "-" + " Spremnik A " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=0)
        self.label_B = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik B " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=1)
        self.label_C = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik C " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=2)
        self.spremnik_A = Spremnik(root, self.sub_frame_A)
        self.spremnik_B = Spremnik(root, self.sub_frame_B)
        self.spremnik_C = Spremnik(root, self.sub_frame_C)
        # Create a canvas that can fit the above video source size
        self.canvas = Canvas(self.bottom_frame,
                             width=self.Video.width,
                             height=self.Video.height)
        self.terminal = Text(self.bottom_frame, height=20, width=40)
        self.terminal_scrollbar = Scrollbar(self.bottom_frame,
                                            command=self.terminal.yview)
        self.terminal.config(yscrollcommand=self.terminal_scrollbar.set)
        self.send_btn = Button(self.bottom_frame,
                               text="SEND",
                               command=self.Send,
                               width=8)
        ''' gridding and packing '''
        self.sub_frame_A.grid(row=1, column=0)
        self.sub_frame_B.grid(row=1, column=1)
        self.sub_frame_C.grid(row=1, column=2)
        self.terminal.grid(row=0, column=0)
        self.terminal_scrollbar.grid(row=0, column=1, sticky='ns')
        self.send_btn.grid(row=0, column=2, padx=50)
        self.canvas.grid(row=0, column=3)
        self.top_frame.pack()
        Label(root, text=270 * '-').pack()
        self.bottom_frame.pack()

        self.delay = 4
        self.update()

    def update(self):
        self.frame = self.Video.Get_Frame()
        self._Detection()
        self.photo = ImageTk.PhotoImage(image=Image.fromarray(self.frame))
        self.canvas.create_image(0, 0, image=self.photo, anchor=NW)
        self.after(self.delay, self.update)

    def Send(self):
        try:
            self.Prepare_Message()
            self.message = ''.join(str(e) for e in self.message_list)
            self.terminal.insert(END, self.message + '\n')
            self.terminal.see('end')
            # print(message)
        except ValueError:
            messagebox.showwarning("Warning!",
                                   "Molim ispravan unos mase (0-1000 grama)")

    def Prepare_Message(self):
        self.spremnici = [self.spremnik_A, self.spremnik_B, self.spremnik_C]
        for spremnik in self.spremnici:
            if spremnik.boja.get() == 0 and spremnik.oblik.get(
            ) == 0 and spremnik.toggle_btn_masa['text'] == 'OFF':
                for i in range(1 + 10 * self.spremnici.index(spremnik),
                               10 + 10 * self.spremnici.index(spremnik)):
                    self.message_list[i] = 0
            else:
                self.message_list[
                    1 + 10 *
                    self.spremnici.index(spremnik)] = spremnik.oblik.get()
                self.message_list[
                    2 +
                    10 * self.spremnici.index(spremnik)] = spremnik.boja.get()
                if spremnik.toggle_btn_masa['text'] == 'ON':
                    if len(spremnik.entry_masa_min.get()) == 0 or len(
                            spremnik.entry_masa_max.get()) == 0:
                        raise ValueError
                    if spremnik.masa_max.get() < spremnik.masa_min.get(
                    ) or spremnik.masa_max.get(
                    ) > 1000 or spremnik.masa_min.get() < 0:
                        raise ValueError
        ''' min masa '''
        temp = [0, 0, 0]
        list_min = [
            self.spremnik_A.masa_min.get(),
            self.spremnik_B.masa_min.get(),
            self.spremnik_C.masa_min.get()
        ]
        for c in range(0, 3):
            choice = list_min[c]
            for i in range(0, 3):
                ostatak = choice % 10
                temp[2 - i] = ostatak
                choice = int(choice / 10)
            self.message_list[3 + c * 10] = temp[0]
            self.message_list[4 + c * 10] = temp[1]
            self.message_list[5 + c * 10] = temp[2]
        ''' max masa '''
        temp = [0, 0, 0, 0]
        list_max = [
            self.spremnik_A.masa_max.get(),
            self.spremnik_B.masa_max.get(),
            self.spremnik_C.masa_max.get()
        ]
        for c in range(0, 3):
            choice = list_max[c]
            for i in range(0, 4):
                ostatak = choice % 10
                temp[3 - i] = ostatak
                choice = int(choice / 10)
            self.message_list[6 + c * 10] = temp[0]
            self.message_list[7 + c * 10] = temp[1]
            self.message_list[8 + c * 10] = temp[2]
            self.message_list[9 + c * 10] = temp[3]

    def _Detection(self):
        self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
        n = 0
        for i, j in colors:
            mask[n] = cv2.inRange(self.hsv, i, j)
            median[n] = cv2.medianBlur(mask[n], 9)
            n += 1
        for n in range(0, COLORS_NUM):
            contours, _ = cv2.findContours(median[n], cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
            for cnt in contours:
                area = cv2.contourArea(cnt)
                # print('area='+str(area))
                approx = cv2.approxPolyDP(cnt, 0.03 * cv2.arcLength(cnt, True),
                                          True)
                x = approx.ravel()[0]
                y = approx.ravel()[1]

                # crvenu boju
                if n == 0:
                    if area > POVRSINA:
                        cv2.drawContours(self.frame, [approx], -1, (0, 0, 255),
                                         3)
                        if len(approx) == 3:
                            cv2.putText(self.frame, 'Crvena piramida', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 0, 255))
                            crveni_br[0] += 1
                            if crveni_br[0] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'23#')
                                self.terminal.insert(END, 'Crvena piramida\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) == 4:
                            cv2.putText(self.frame, 'Crvena kocka', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 0, 255))
                            crveni_br[1] += 1
                            if crveni_br[1] >= MIN_DETECT:
                                Clear_Color_Counters()
                                # UART.write(b'22#')
                                self.terminal.insert(END, 'Crvena kocka\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) >= 7:
                            cv2.putText(self.frame, 'Crvena kugla', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 0, 255))
                            crveni_br[2] += 1
                            if crveni_br[2] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'21#')
                                self.terminal.insert(END, 'Crvena kugla\n')
                                self.terminal.see('end')
                                self.flag = 0
                # zelena boja
                elif n == 1:
                    if area > POVRSINA:
                        cv2.drawContours(self.frame, [approx], -1, (0, 255, 0),
                                         3)
                        if len(approx) == 3:
                            cv2.putText(self.frame, 'Zelena piramida', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 255, 0))
                            zeleni_br[0] += 1
                            if zeleni_br[0] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'13#')
                                self.terminal.insert(END, 'Zelena piramida\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) == 4:
                            cv2.putText(self.frame, 'Zelena kocka', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 255, 0))
                            zeleni_br[1] += 1
                            if zeleni_br[1] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'12#')
                                self.terminal.insert(END, 'Zelena kocka\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) >= 7:
                            cv2.putText(self.frame, 'Zelena kugla', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (0, 255, 0))
                            zeleni_br[2] += 1
                            if zeleni_br[2] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'11#')
                                self.terminal.insert(END, 'Zelena kugla\n')
                                self.terminal.see('end')
                                self.flag = 0
                # plava boja
                elif n == 2:
                    if area > POVRSINA:
                        cv2.drawContours(self.frame, [approx], -1, (255, 0, 0),
                                         3)
                        if len(approx) == 3:
                            cv2.putText(self.frame, 'Plava piramida', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 0, 0))
                            plavi_br[0] += 1
                            if plavi_br[0] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'33#')
                                self.terminal.insert(END, 'Plava piramida\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) == 4:
                            cv2.putText(self.frame, 'Plava kocka', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 0, 0))
                            plavi_br[1] += 1
                            if plavi_br[1] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'32#')
                                self.terminal.insert(END, 'Plava kocka\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) >= 7:
                            cv2.putText(self.frame, 'Plava kugla', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 0, 0))
                            plavi_br[2] += 1
                            if plavi_br[2] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'31#')
                                self.terminal.insert(END, 'Plava kugla\n')
                                self.terminal.see('end')
                                self.flag = 0
                # zuta boja
                elif n == 3:
                    if area > POVRSINA:
                        cv2.drawContours(self.frame, [approx], -1,
                                         (255, 255, 0), 3)
                        if len(approx) == 3:
                            cv2.putText(self.frame, 'Zuta piramida', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 255, 0))
                            zuti_br[0] += 1
                            if zuti_br[0] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'43#')
                                self.terminal.insert(END, 'Zuta piramida\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) == 4:
                            cv2.putText(self.frame, 'Zuta kocka', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 255, 0))
                            zuti_br[1] += 1
                            if zuti_br[1] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'42#')
                                self.terminal.insert(END, 'Zuta kocka\n')
                                self.terminal.see('end')
                                self.flag = 0
                        elif len(approx) >= 7:
                            cv2.putText(self.frame, 'Zuta kugla', (x, y),
                                        cv2.FONT_HERSHEY_COMPLEX, 1,
                                        (255, 255, 0))
                            zuti_br[2] += 1
                            if zuti_br[2] >= MIN_DETECT:
                                Clear_Color_Counters()
                                #UART.write(b'41#')
                                self.terminal.insert(END, 'Zuta kugla\n')
                                self.terminal.see('end')
                                self.flag = 0
Exemplo n.º 17
0
    def __init__(self, root, **kwargs):
        Frame.__init__(self, root, **kwargs)
        self.message_list = [
            'A', 0, 0, 0, 0, 0, 0, 0, 0, 0, 'B', 0, 0, 0, 0, 0, 0, 0, 0, 0,
            'C', 0, 0, 0, 0, 0, 0, 0, 0, 0, '#'
        ]
        ''' making some frames '''
        self.top_frame = Frame(root)
        self.sub_frame_A = Frame(self.top_frame)
        self.sub_frame_B = Frame(self.top_frame)
        self.sub_frame_C = Frame(self.top_frame)
        self.bottom_frame = Frame(root)

        self.flag = 0

        # open video source
        self.Video = VideoCapture(0)

        self.label_A = Label(self.top_frame,
                             text=DASH_COUNT * "-" + " Spremnik A " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=0)
        self.label_B = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik B " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=1)
        self.label_C = Label(self.top_frame,
                             text=DASH_COUNT * '-' + " Spremnik C " +
                             DASH_COUNT * "-",
                             bd=2,
                             relief="solid",
                             fg='white',
                             bg='grey').grid(row=0, column=2)
        self.spremnik_A = Spremnik(root, self.sub_frame_A)
        self.spremnik_B = Spremnik(root, self.sub_frame_B)
        self.spremnik_C = Spremnik(root, self.sub_frame_C)
        # Create a canvas that can fit the above video source size
        self.canvas = Canvas(self.bottom_frame,
                             width=self.Video.width,
                             height=self.Video.height)
        self.terminal = Text(self.bottom_frame, height=20, width=40)
        self.terminal_scrollbar = Scrollbar(self.bottom_frame,
                                            command=self.terminal.yview)
        self.terminal.config(yscrollcommand=self.terminal_scrollbar.set)
        self.send_btn = Button(self.bottom_frame,
                               text="SEND",
                               command=self.Send,
                               width=8)
        ''' gridding and packing '''
        self.sub_frame_A.grid(row=1, column=0)
        self.sub_frame_B.grid(row=1, column=1)
        self.sub_frame_C.grid(row=1, column=2)
        self.terminal.grid(row=0, column=0)
        self.terminal_scrollbar.grid(row=0, column=1, sticky='ns')
        self.send_btn.grid(row=0, column=2, padx=50)
        self.canvas.grid(row=0, column=3)
        self.top_frame.pack()
        Label(root, text=270 * '-').pack()
        self.bottom_frame.pack()

        self.delay = 4
        self.update()
Exemplo n.º 18
0
def test_online(filename):
    """
    Test online detection and tracking
    """
    cap = VideoCapture(filename)

    retval = cap.isOpened()
    if not retval:
        print("Cannot open camera/video!")
    else:  # start the video capturing thread
        cap.start()
    ffp_flag = True
    fps_queue = []
    fps_queue_size = 20
    while (retval):
        # Capture frame-by-frame
        tStart = time.time()
        ret, img = cap.read()
        if not ret:
            break
        # mirror image
        img = np.fliplr(img)
        # landmark detection tracking
        if ffp_flag > 0:
            pts, ffp_flag, confidence = engine.ffp_detect(img)
        else:
            pts, ffp_flag, confidence = engine.ffp_track(img, pts)
            #pts, ffp_flag, confidence = engine.ffp_detect(img)
        tEnd = time.time()
        if np.sum(pts) == 0:  # failed detectio, continue
            continue
        ## uncomment for eye cropping
        #eye_left = crop_eye(img, pts[19, :], pts[22, :])
        #eye_right = crop_eye(img, pts[25, :], pts[28, :])
        #gaze = f(eye_left) # potential gaze estimation
        vis = img.copy()
        if ffp_flag == 0:
            num_pts = pts.shape[0]
            if vis.ndim < 3:
                vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
            for idx in np.arange(num_pts):
                cv2.circle(
                    vis, (int(pts[idx, 0].round()), int(pts[idx, 1].round())),
                    2, (0, 255, 0), -1)

        fps = 1.0 / (tEnd - tStart)
        if len(fps_queue) < fps_queue_size:
            fps_queue.append(fps)
        else:
            fps_queue[:-1] = fps_queue[1:]
            fps_queue[-1] = fps
        tStart = tEnd
        draw_str(
            vis, (20, 20), 'fps: %3.1f, flag = %d, confidence = %4.2f' %
            (np.median(fps_queue), ffp_flag, confidence))
        #print fps
        cv2.imshow('ffp_detection', vis)
        if 0xFF & cv2.waitKey(5) == 27:
            break
        #return img
    cap.release()
    cv2.destroyAllWindows()