Exemplo n.º 1
0
class CameraController(object):
    """
    Summary => will control the camera for the robotic artist.

    Description => will control the camera, for the input and output
                    proccess's. This will include being able to create
                    and stop a thread that is produced during the process.

    args => None

    None => None
    """
    def __init__(self, ui, update_method):
        """
        Summary => intialize the camera.

        Description => will intialize the camera_controller and will connect
                the camera to the application.

        args => None

        None => None
        """
        self.ui = ui
        self.update_method = update_method

    def start_video_capture(self):
        """
        Summary => starts the video capture and sets the fps for this.

        Description => will start the video capture. By making a thread that
                will continusly update the gui. On what is occouring.

        args => None

        None => None
        """
        # intialize the video_capture object, with the set fps
        self.video_capture = VideoCapture(self.ui, self.update_method)
        # change static boolean to False
        VideoCapture.stopped = False
        # start the thread to capture the video.
        self.video_capture.start()

    def stop_video_capture(self):
        """
        Summary => stops the video_capture.

        Description => will stop the video capture by changing the static
                    boolean VideoCapture.stooped to ture.

                    video to be captured in.
        args => None

        None => None
        """
        # changes static boolean to true, this stops the thread running in the
        # the background
        VideoCapture.stopped = True
Exemplo n.º 2
0
 def get_current_face_encoding(self):
     """获取现在的face_encoding"""
     vc = VideoCapture()
     vc.read_frame()
     face_encodings = vc.get_face_encodings()
     while len(face_encodings) != 1:
         vc.read_frame()
         face_encodings = vc.get_face_encodings()
     self.face_encoding = face_encodings[0]
     vc.destroy_instance()
Exemplo n.º 3
0
    def __init__(self, uri, transform=None, queue_size=128, gpu_id=None, quiet=True, read_type='numpy'):
        # initialize the video capture along with the boolean
        # used to indicate if the thread should be stopped or not
        self.cap = VideoCapture(uri, gpu_id=gpu_id, quiet=quiet)
        self.stopped = False
        self.transform = transform
        self.read_type = read_type

        # initialize queue and thread
        self.Q = Queue(maxsize=queue_size)
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True
Exemplo n.º 4
0
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
                self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
                self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        self.img_widget_vid_analysis = ImageWidget()

        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
                self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)
Exemplo n.º 5
0
def main():
    def roiSelectedEvent(roi, imageRoi, wnd, image):
        if roi is None:
            return
        predictions = predict_on_image(model, None, imageRoi, 0.95)
        cv2.imshow('roi', predictions)

    model = getModel()
    video = VideoCapture(video_sources.video_2)
    vc = VideoController(10, 'pause')
    wnd = Wnd('video', roiSelectedEvent=roiSelectedEvent)
    for frame in video.frames():
        cv2.destroyWindow('roi')
        wnd.imshow(frame)
        if vc.wait_key() == 27: break
Exemplo n.º 6
0
    def start_video_capture(self):
        """
        Summary => starts the video capture and sets the fps for this.

        Description => will start the video capture. By making a thread that
                will continusly update the gui. On what is occouring.

        args => None

        None => None
        """
        # intialize the video_capture object, with the set fps
        self.video_capture = VideoCapture(self.ui, self.update_method)
        # change static boolean to False
        VideoCapture.stopped = False
        # start the thread to capture the video.
        self.video_capture.start()
Exemplo n.º 7
0
def main():
    # dir_name = '/lfs/1/ddkang/blazeit/data/svideo/jackson-town-square/2017-12-14'
    # index_fname = '/lfs/1/ddkang/blazeit/data/svideo/jackson-town-square/2017-12-14.json'
    dir_name = '/lfs/1/ddkang/blazeit/data/svideo/jackson-town-square/short'
    index_fname = dir_name + '.json'
    cap = VideoCapture(dir_name, index_fname)
    '''cap.set(cv2.CAP_PROP_POS_FRAMES, 300)
    ret, frame = cap.read()
    cap2 = cv2.VideoCapture(os.path.join(dir_name, '3.mp4'))
    _, f2 = cap2.read()'''
    i = 0
    ret, frame = cap.read()
    while ret:
        i += 1
        ret, frame = cap.read()
    print(i)

    print(np.array_equal(frame, f2))
Exemplo n.º 8
0
    def __init__(self, env, video_device):
        try:
            self.API_ENDPOINT = env['ApiEndpoint']
            self.FACE_AREA_THRESHOLD = env['FaceAreaThreshold']
            self.NAME_TTL_SEC = env['NameTtlSec']
            self.FACE_SIMILARITY_THRESHOLD = env['FaceSimilarityThreshold']
            self.COGNITO_USERPOOL_ID = env['CognitoUserPoolId']
            self.COGNITO_USERPOOL_CLIENT_ID = env['CognitoUserPoolClientId']
            self.REGION = env['Region']
        except KeyError:
            print('Invalid config file')
            raise

        self.recent_name_list = []
        self.registered_name_set = set()
        self.video_capture = VideoCapture(env, video_device)
        self.detector = Detector(env)
        self.viewer = Viewer(env)
Exemplo n.º 9
0
def main():
    feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
    lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

    video = VideoCapture(video_sources.video_2)
    wnd = CvNamedWindow('video')
    vc = VideoController(delay=50)

    prev_gray = None
    po = None
    tracking = False

    for frame in video.frames():
        if tracking:
            frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # calculate optical flow
            p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
            p1 = p1[st == 1]
            # draw the tracks
            for pt in p1:
                a, b = pt.ravel()
                frame = cv2.circle(frame, (a, b), 5, (0, 255, 0), -1)
            prev_gray = frame_gray
            p0 = p1.reshape(-1, 1, 2)

        wnd.imshow(frame)

        key = vc.wait_key()
        if key == 27:
            break
        elif not tracking and key == ord('r'):  # init tracking
            roi = cv2.selectROI('roi', frame)
            cv2.destroyWindow('roi')

            if roi is None or sum(roi) == 0:
                continue

            prev_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            p0 = cv2.goodFeaturesToTrack(prev_gray, mask=roi_mask(prev_gray, roi), **feature_params)
            if p0 is not None and len(p0) > 0:
                tracking = True

    video.release()
Exemplo n.º 10
0
    def __init__(self, window, servo):
        self.window = window
        self.window.title(u"Monitor")

        # Set canvas aspect as 9:16.
        self.canvas = tkinter.Canvas(window,
                                     width=self.canvas_width,
                                     height=self.canvas_width * (9 / 16))
        self.canvas.grid(columnspan=4)

        self.edit_box = tkinter.Entry(window)
        self.edit_box.grid(row=1, column=0)

        self.btn_snapshot = tkinter.Button(window,
                                           text="WRITE",
                                           command=self.write_to_servo)
        self.btn_snapshot.grid(row=1, column=1)

        self.btn_move = tkinter.Button(window,
                                       text="LEFT",
                                       command=self.move_left)
        self.btn_move.grid(row=1, column=2)

        self.btn_stop = tkinter.Button(window,
                                       text="RIGHT",
                                       command=self.move_right)
        self.btn_stop.grid(row=1, column=3)

        self.btn_stop = tkinter.Button(window,
                                       text="TRACE",
                                       command=self.look_human)
        self.btn_stop.grid(row=1, column=4)

        self.human_list = HumanList(window)
        self.human_list.grid(row=0, column=4)

        self.vid = VideoCapture()
        self.servo = servo

        self.update()
        self.window.mainloop()
Exemplo n.º 11
0
def main():
    video = VideoCapture(video_sources.video_2)

    frame = video.read()
    backSubtractor = BackgroundSubtractorAVG(0.2, denoise(frame))

    for frame in video.frames():
        with utils.timeit_context():
            frame = denoise(frame)
            foreGround = backSubtractor.getForeground(frame)
            # Apply thresholding on the background and display the resulting mask
            ret, mask = cv2.threshold(foreGround, 15, 255, cv2.THRESH_BINARY)

        cv2.imshow('input', frame)
        cv2.imshow('foreground', foreGround)
        # Note: The mask is displayed as a RGB image, you can
        # display a grayscale image by converting 'foreGround' to
        # a grayscale before applying the threshold.
        cv2.imshow('mask', mask)

        if cv2.waitKey(10) & 0xFF == 27:
            break

    video.release()
    cv2.destroyAllWindows()
Exemplo n.º 12
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    video_wnd, = Wnd.create('video')
    # h_wnd, s_wnd, v_wnd = Wnd.create('H', 'S', 'V')
    # L_wnd, a_wnd, b_wnd = Wnd.create('L', 'a', 'b')

    frames_iter = work_area.skip_non_area(video.frames())
    for frame, _ in frames_iter:
        # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # h_wnd.imshow(hsv[:, :, 0])
        # s_wnd.imshow(hsv[:, :, 1])
        # v_wnd.imshow(hsv[:, :, 2])

        # lab = cv2.cvtColor(frame, cv2.COLOR_BGR2Lab)
        # L_wnd.imshow(lab[:, :, 0])
        # a_wnd.imshow(lab[:, :, 1])
        # b_wnd.imshow(lab[:, :, 2])

        vis_img = utils.put_frame_pos(frame, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)

        if vc.wait_key() == 27: break

    video.release()
Exemplo n.º 13
0
def main():
    video = VideoCapture(video_sources.video_2)
    workArea = WorkAreaView(video_sources.video_2_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, colorDiffWnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'color diff', 'Learned BG')
    colorAbsDiffWnd = Wnd('color_abs_diff')
    segmentedWnd = Wnd('segmented')

    segmenter = Segmenter()

    frames_iter = workArea.skip_non_area(video.frames())

    motionDetector = MotionDetector(next(frames_iter)[0], 3)
    backgroundModel = BackgroundModel(15)
    prevBackground = None
    for frame, _ in frames_iter:
        motionDetector.detect(frame)

        if motionDetector.motionEnded():
            # calc fgMask
            mask, gray_diff, color_diff, colorAbsDiff = calcForegroundMask(
                prevBackground, frame)
            # bin_diff_wnd.imshow(resize(mask, 0.5))
            bin_diff_wnd.imshow(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR))

            # gray_diff_wnd.imshow(resize(gray_diff, .5))
            # colorDiffWnd.imshow(resize(color_diff, .5))
            # colorAbsDiffWnd.imshow(resize(colorAbsDiff, .5))
            markers, objectsCount = segmenter.segment(mask)
            segmentedWnd.imshow(
                resize(Segmenter.markersToDisplayImage(markers, objectsCount),
                       .5))

            backgroundModel = BackgroundModel(15)

        if motionDetector.isSilence():
            backgroundModel.learn(frame, foregroundMask=None)
            learned_BG_wnd.imshow(resize(backgroundModel.learned, .5))

        if motionDetector.motionStarted():
            prevBackground = backgroundModel.learned
            backgroundModel = None
            learned_BG_wnd.imshow(resize(prevBackground, .5))

        # VIS

        vis_img = motionDetector.indicateCurrentState(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motionDetector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motionDetector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Exemplo n.º 14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--video_dir', required=True)
    parser.add_argument('--index_fname', required=True)
    parser.add_argument('--npy_fname', required=True)
    parser.add_argument('--resol', type=int, required=True)
    parser.add_argument('--base_name', default=None)
    args = parser.parse_args()

    vid_dir = args.video_dir
    index_fname = args.index_fname
    npy_fname = args.npy_fname
    resol = args.resol
    if args.base_name is None:
        crop_and_exclude = False
    else:
        crop_and_exclude = True
        from blazeit.data.video_data import get_video_data
        video_data = get_video_data(args.base_name)

    cap = VideoCapture(vid_dir, index_fname)
    data = np.zeros((cap.cum_frames[-1], resol, resol, 3), dtype=np.float32)
    for i in range(len(data)):
        if i % 1000 == 0:
            print('Processing frame', i)
        ret, frame = cap.read()
        if not ret:
            print('Something really bad happened')
            sys.exit(1)
        if crop_and_exclude:
            frame = video_data.process_frame(frame)
        data[i] = cv2.resize(frame, (resol, resol))

    data /= 255.
    data[..., :] -= [0.485, 0.456, 0.406]
    data[..., :] /= [0.229, 0.224, 0.225]

    np.save(npy_fname, data)
Exemplo n.º 15
0
    def display_stack(self, i):
        """
        Summary => changes the widget stack page.

        Description => will change the stacked widget view to the int given
        through i.

        This code was found on:
        https://www.tutorialspoint.com/pyqt/pyqt_qstackedwidget.htm

        If the specified page has any objects that needs to be initialized
        then the process will be ran through here.

        args => i => the int for the page wanted. Ranging from 0-3

        return => None
        """
        self.ui.page_layer.setCurrentIndex(i)

        if (i == 0):
            print("starting VideoCapture")
            # set checked_boxes to false
            self.ui.check_process.setChecked(False)
            # creating a thread to update images when complete.
            # update images in gui
            update = UpdateImages(self.ui)
            # connect signal
            self.connect(update, QtCore.SIGNAL("update_images(PyQt_PyObject)"),
                         self.update_images)
            # intialize the video_capture object
            self.video_capture = VideoCapture(self.ui, update)
            # connect video_capture to main gui thread
            self.connect(self.video_capture,
                         QtCore.SIGNAL("update_capture(QImage)"),
                         self.update_capture)
            self.camera.start_video_capture(self.video_capture)
        elif (i == 2):
            # set check boxes to false
            for check in self.style_check_boxes:
                check.setChecked(False)
Exemplo n.º 16
0
def main():
    video = VideoCapture(video_sources.video_6)
    work_area = WorkAreaView(video_sources.video_6_work_area_markers)

    vc = VideoController(10, 'pause')
    (video_wnd, bin_diff_wnd, gray_diff_wnd, frame0_diff_wnd,
     learned_BG_wnd) = Wnd.create('video', 'binary diff', 'gray diff',
                                  'diff with frame0', 'Learned BG')

    frames_iter = work_area.skip_non_area(video.frames())

    motion_detector = MotionDetector(next(frames_iter)[0], 3)
    background = BackgroundModel(motion_detector, 15)

    for frame, _ in frames_iter:
        motion_detector.detect(frame)
        if not background.done:
            background.learn()
        else:
            if motion_detector.motion_ended():
                frame0_diff = cv2.absdiff(background.learned, frame)
                gray_of_color_diff = Helper.to_gray(frame0_diff)

                frame0_diff_wnd.imshow(
                    resize(
                        np.hstack(
                            (frame0_diff, Helper.to_bgr(gray_of_color_diff))),
                        .5))

                _, binary = cv2.threshold(gray_of_color_diff, 35, 255,
                                          cv2.THRESH_BINARY)
                cv2.imshow('1 binary', resize(binary, .5))

                # VIS
        if background.done:
            learned_BG_wnd.imshow(resize(background.learned, 1))

        vis_img = motion_detector.put_current_state(frame.copy())
        vis_img = utils.put_frame_pos(vis_img, video.frame_pos(), xy=(2, 55))
        video_wnd.imshow(vis_img)
        # bin_diff_wnd.imshow(resize(motion_detector.bin_diff, .5))
        # gray_diff_wnd.imshow(resize(motion_detector.gray_diff, .5))
        # VIS END

        if vc.wait_key() == 27: break

    video.release()
Exemplo n.º 17
0
def main():
    video = VideoCapture(video_sources.video_6)

    diff_wnd = CvNamedWindow('diff')
    mask_wnd = CvNamedWindow('mask')
    input_wnd = CvNamedWindow('input')

    # prev_frame = denoise(video.read())
    prev_frame = cv2.cvtColor(denoise(video.read()), cv2.COLOR_BGR2GRAY)

    vm = VideoController(10)
    diff = None
    for frame in video.frames():
        # with utils.timeit_context('frame processing'):
        #     frame = denoise(frame)
        #     diff = cv2.absdiff(prev_frame, frame, dst=diff)
        #     ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
        #     binary_mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        #     # cn = cv2.countNonZero(binary_mask)
        #     nonzero = cv2.findNonZero(binary_mask)

        with utils.timeit_context('frame processing'):
            frame = denoise(frame)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            diff = cv2.absdiff(prev_frame, gray, dst=diff)
            ret, mask = cv2.threshold(diff, 45, 255, cv2.THRESH_BINARY)
            # cn = cv2.countNonZero(binary_mask)
            nonzero = cv2.findNonZero(mask)

        diff_wnd.imshow(diff)
        mask_wnd.imshow(mask)
        input_wnd.imshow(utils.put_frame_pos(frame.copy(), video.frame_pos()))

        prev_frame = gray

        if not vm.wait_key():
            break

    video.release()
    cv2.destroyAllWindows()
    def scan(self):
        """
        カメラからQRコード/バーコードを読み込んで情報を返却
        :return: list|None
        """
        with VideoCapture() as cap:
            ret, img = cap.read()

            # 読み込めない場合はカメラが起動中の可能性があるので少し待って再取得
            if ret is not True:
                sleep(2)
                ret, img = cap.read()

            # それでもダメならメッセージを出力してNone返却
            if ret is not True:
                print('capture device is not available')
                return None

            # QRコード/バーコードが画像に含まれてるか確認したいときに利用
            # cv2.imwrite('./test.jpg', img)

            data = self.decode(img)

            return data
	def using_video_capture(cls, video_capture_source, is_show_gui):
		print("Using video capture")
		cap = VideoCapture(video_capture_source)
		return cls(None, cap, is_show_gui)
Exemplo n.º 20
0
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # FIXME - libpng warning: iCCP: known incorrect sRGB profile
        self.setWindowIcon(QIcon("./images/robot_icon.png"))

        # keep the window fixed sized
        self.setFixedSize(self.size())

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
            self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
            self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(
            self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        # video analysis image widget
        self.img_widget_vid_analysis = ImageWidget()
        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        # face training image widget
        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        # face identification image widget
        self.img_widget_identify_face = ImageWidget()
        self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)

        # image analysis image widget
        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
        img = cv2.imread("images/human.png")
        self.img_widget_img_analysis.handle_image_data(img)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
            self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(
            self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(
            self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(
            self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(
            self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)

        self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)

        self.btnTalk.clicked.connect(self.lets_talk)

        # create and start robot
        self.robot = Robot(self.lblRobot)

        self.mouth = Mouth()

        # connect global signals to slots
        g_emitter().feed_mouth.connect(self.mouth.feed_text)
        g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
        g_emitter().set_idle_state.connect(self.robot.set_idle_state)

        self.robot.start()
        self.mouth.start()
Exemplo n.º 21
0
class AppWindow(QMainWindow):
    """
    Main GUI class for application
    """
    def __init__(self):
        QWidget.__init__(self)

        # loaind ui from xml
        uic.loadUi(os.path.join(DIRPATH, 'app.ui'), self)

        # FIXME - libpng warning: iCCP: known incorrect sRGB profile
        self.setWindowIcon(QIcon("./images/robot_icon.png"))

        # keep the window fixed sized
        self.setFixedSize(self.size())

        # button event handlers
        self.btnStartCaptureForVideoAnalysis.clicked.connect(
            self.start_capture_for_video_analysis)
        self.btnStopCaptureForVideoAnalysis.clicked.connect(
            self.stop_capture_for_video_analysis)

        self.btnChooseClassifierXML.clicked.connect(
            self.choose_classifier_file)

        self.btnChooseImage.clicked.connect(self.choose_image_for_analysis)

        self.setup_tray_menu()

        # add camera ids
        for i in range(0, 11):
            self.cboxCameraIds.addItem(str(i))
            self.cboxCameraIds1.addItem(str(i))

        # setting up handlers for menubar actions
        self.actionAbout.triggered.connect(self.about)
        self.actionExit.triggered.connect(qApp.quit)
        self.actionPreferences.triggered.connect(self.show_preferences)

        # video analysis image widget
        self.img_widget_vid_analysis = ImageWidget()
        self.hlayoutVideoAnalysis.addWidget(self.img_widget_vid_analysis)

        # face training image widget
        self.img_widget_face_training = ImageWidget()
        self.hlayoutFaceTrainingImg.addWidget(self.img_widget_face_training)

        # face identification image widget
        self.img_widget_identify_face = ImageWidget()
        self.hlayoutIdentifyFace.addWidget(self.img_widget_identify_face)

        # image analysis image widget
        self.img_widget_img_analysis = ImageWidget()
        self.hlayoutImageAnalysis.addWidget(self.img_widget_img_analysis)
        img = cv2.imread("images/human.png")
        self.img_widget_img_analysis.handle_image_data(img)

        self.vid_capture = VideoCapture()
        self.vid_capture.got_image_data_from_camera.connect(
            self.process_image_data_from_camera)

        self.highlight_faces = self.chkHighlightFaces.isChecked()
        self.chkHighlightFaces.stateChanged.connect(
            self.highlight_faces_checkbox_changed)
        self.chckGrayscale.stateChanged.connect(
            self.grayscale_checkbox_changed)

        # face trainer dataset browser btn handler
        self.btnBrowseDatasetForFaceTrainer.clicked.connect(
            self.browse_dataset_for_face_trainer)
        self.btnBrowseClassifierForFaceTrainer.clicked.connect(
            self.browse_classifier_file_for_face_trainer)
        self.btnStartFaceTrainer.clicked.connect(self.start_face_trainer)

        self.btnBrowseIdentifyFace.clicked.connect(self.browse_identify_face)

        self.btnTalk.clicked.connect(self.lets_talk)

        # create and start robot
        self.robot = Robot(self.lblRobot)

        self.mouth = Mouth()

        # connect global signals to slots
        g_emitter().feed_mouth.connect(self.mouth.feed_text)
        g_emitter().set_speaking_state.connect(self.robot.set_speaking_state)
        g_emitter().set_idle_state.connect(self.robot.set_idle_state)

        self.robot.start()
        self.mouth.start()

    def lets_talk(self):
        text = self.teTalk.toPlainText()
        self.teTalk.setText("")
        g_emitter().emit_signal_to_feed_mouth(text)

    def browse_identify_face(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        self.teIdentifyFace.setText(fname[0])

        img = cv2.imread(fname[0])
        self.img_widget_identify_face.handle_image_data(img)

    def start_face_trainer(self):
        dataset_dir = self.teFaceTrainerDataset.toPlainText()
        classifier_xml = self.teFaceTrainerClassifier.toPlainText()
        log.info(
            "starting face trainer with classifier '%s' and dataset '%s'" %
            (classifier_xml, dataset_dir))

        ft = FaceTrainer(classifier_xml, dataset_dir)
        ft.processing_image.connect(self.processing_image_for_training)
        ft.face_training_finished.connect(self.face_training_finished)
        ft.start()
        self.lblFaceTrainingStatus.setText("FACE TRAINING UNDER PROGRESS")

    def face_training_finished(self):
        self.lblFaceTrainingStatus.setText("FACE TRAINING FINISHED")
        g_emitter().emit_signal_to_feed_mouth("face training finished")

    def processing_image_for_training(self, label, fname):
        log.info("processing image for training: '%s'" % label)
        self.lblFaceTrainerCurImg.setText("Learning face of: '%s' " % label)

        try:
            img = cv2.imread(fname)
            self.img_widget_face_training.handle_image_data(img)
        except Exception as exp:
            log.warning("failed while processing image '%s' while training" %
                        fname)
            log.warning("Exception: %s" % str(exp))

    def browse_dataset_for_face_trainer(self):
        dataset_dir = str(
            QFileDialog.getExistingDirectory(self,
                                             'Select directory for dataset',
                                             '/home'))
        log.info("dataset dir file: %s" % dataset_dir)
        self.teFaceTrainerDataset.setText(dataset_dir)

    def browse_classifier_file_for_face_trainer(self):
        classifier_xml = QFileDialog.getOpenFileName(self, 'Open file',
                                                     '/home')
        log.info("classifier xml file: %s" % classifier_xml[0])
        self.teFaceTrainerClassifier.setText(classifier_xml[0])

    def grayscale_checkbox_changed(self):
        fname = self.teImage.toPlainText()
        print(fname)
        img = cv2.imread(fname)
        if self.chckGrayscale.isChecked():
            # convert image to grayscale
            pil_image = Image.open(fname).convert("L")

            # convery grayscale image to numpy array
            image_array = np.array(pil_image, "uint8")

            # FIXME - code crashes here !!!
            self.img_widget_img_analysis.handle_image_data(image_array)
        else:
            self.img_widget_img_analysis.handle_image_data(img)

    def highlight_faces_checkbox_changed(self):
        if self.chkHighlightFaces.isChecked():
            print("yes")
        else:
            print("no")

    def choose_classifier_file(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        log.info("chose classfier xml file: %s" % fname[0])
        self.teClassifierXML.setText(fname[0])

    def choose_image_for_analysis(self):
        fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')
        log.info("chose imagefile: %s, for analysis" % fname[0])
        self.teImage.setText(fname[0])

        img = cv2.imread(fname[0])
        self.img_widget_img_analysis.handle_image_data(img)

    def start_capture_for_video_analysis(self):
        log.debug("start video capture")
        self.vid_capture.start()

    def stop_capture_for_video_analysis(self):
        log.debug("start video capture")
        self.vid_capture.stop()
        self.img_widget_vid_analysis.reset()

    def detect_face_in_image_data(self, image_data):
        """
        function detects faces in image data,
        draws rectangle for faces in image data,
        and returns this updated image data with highlighted face/s
        """
        self._red = (0, 0, 255)
        self._width = 2
        self._min_size = (30, 30)

        # haarclassifiers work better in black and white
        gray_image = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.equalizeHist(gray_image)

        # path to Haar face classfier's xml file
        face_cascade_xml = './cascades/haarcascades_cuda/haarcascade_frontalface_default.xml'
        self.classifier = cv2.CascadeClassifier(face_cascade_xml)
        faces = self.classifier.detectMultiScale(gray_image,
                                                 scaleFactor=1.3,
                                                 minNeighbors=4,
                                                 flags=cv2.CASCADE_SCALE_IMAGE,
                                                 minSize=self._min_size)

        for (x, y, w, h) in faces:
            cv2.rectangle(image_data, (x, y), (x + w, y + h), self._red,
                          self._width)

        return image_data

    def process_image_data_from_camera(self, image_data):
        if self.chkHighlightFaces.isChecked():
            image_data = self.detect_face_in_image_data(image_data)
        self.img_widget_vid_analysis.handle_image_data(image_data)

    def about(self):
        ad = AboutDialog()
        ad.display()

    def show_preferences(self):
        print("preferences")
        pd = PrefsDialog()
        pd.display()

    def setup_tray_menu(self):

        # setting up QSystemTrayIcon
        self.tray_icon = QSystemTrayIcon(self)
        self.tray_icon.setIcon(QIcon("./images/robot_icon.png"))

        # tray actions
        show_action = QAction("Show", self)
        quit_action = QAction("Exit", self)
        hide_action = QAction("Hide", self)

        # action handlers
        show_action.triggered.connect(self.show)
        hide_action.triggered.connect(self.hide)
        quit_action.triggered.connect(qApp.quit)

        # tray menu
        tray_menu = QMenu()
        tray_menu.addAction(show_action)
        tray_menu.addAction(hide_action)
        tray_menu.addAction(quit_action)
        self.tray_icon.setContextMenu(tray_menu)
        self.tray_icon.show()

    def closeEvent(self, event):
        try:
            event.ignore()
            self.hide()
            self.tray_icon.showMessage("RoboVision",
                                       "RoboVision was minimized to Tray",
                                       QSystemTrayIcon.Information, 2000)
            self.robot.stop()
            self.robot.join()
        except Exception as exp:
            log.warning("app close exp: %s" % str(exp))

    def ok_pressed(self):
        log.debug("[AppWindow] :: ok")
        self.show_msgbox("AppWindow", "Its ok")

    def show_msgbox(self, title, text):
        """
        Function for showing error/info message box
        """
        msg = QMessageBox()
        msg.setIcon(QMessageBox.Information)
        msg.setText(text)
        msg.setWindowTitle(title)
        msg.setStandardButtons(QMessageBox.Ok)

        retval = msg.exec_()
        print("[INFO] Value of pressed message box button:", retval)
Exemplo n.º 22
0
def detect():
    print('detecting...')


def addFace():
    print('adding...')


def deleteFace():
    print('deleting...')


app = QApplication(sys.argv)  # 创建GUI对象
main = Main()  # 创建主窗体ui类对象

from video_capture import VideoCapture
videoCapture = VideoCapture(VIDEO_WIDTH, VIDEO_HEIGHT, FACEPPFILE,
                            VIDEO_DEVICE, main)

main.show()  # 显示主窗体

main.pushButton_exit.clicked.connect(exit)
main.identification.clicked.connect(detect)
main.pushButton_pz.clicked.connect(addFace)
main.pushButton_delete.clicked.connect(deleteFace)

while (True):
    videoCapture.drawPicture()
    videoCapture.waitKey()
Exemplo n.º 23
0
def get_video():
    return VideoCapture(video_sources.video_6)
Exemplo n.º 24
0
class VideoStream:
    def __init__(self, uri, transform=None, queue_size=128, gpu_id=None, quiet=True, read_type='numpy'):
        # initialize the video capture along with the boolean
        # used to indicate if the thread should be stopped or not
        self.cap = VideoCapture(uri, gpu_id=gpu_id, quiet=quiet)
        self.stopped = False
        self.transform = transform
        self.read_type = read_type

        # initialize queue and thread
        self.Q = Queue(maxsize=queue_size)
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True

    def start(self):
        # start a thread to read frames from the file video stream
        self.thread.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the thread
            if self.stopped: break

            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                frame = self.cap.read(type=self.read_type)

                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if frame is None:
                    self.stopped = True
                    
                # if there are transforms to be done, might as well
                # do them on producer thread before handing back to
                # consumer thread. ie. Usually the producer is so far
                # ahead of consumer that we have time to spare.
                #
                # Python is not parallel but the transform operations
                # are usually OpenCV native so release the GIL.
                #
                # Really just trying to avoid spinning up additional
                # native threads and overheads of additional
                # producer/consumer queues since this one was generally
                # idle grabbing frames.
                if self.transform:
                    frame = self.transform(frame)

                # add the frame to the queue
                self.Q.put(frame)
            else:
                time.sleep(0.1)  # Rest for 10ms, we have a full queue
        self.cap.release()

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    # Insufficient to have consumer use while(more()) which does
    # not take into account if the producer has reached end of stream.
    def running(self):
        return self.more() or not self.stopped

    def more(self):
        # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment
        tries = 0
        while self.Q.qsize() == 0 and not self.stopped and tries < 5:
            time.sleep(0.1)
            tries += 1
        return self.Q.qsize() > 0

    def stop(self):
        # wait until stream resources are released (producer thread might be still grabbing frame)
        self.thread.join()
        # indicate that the thread should be stopped
        self.stopped = True
    def is_detect(self, img1=None, img2=None, img3=None):
        """
        動体があるか判定する
        :param img1: numpy.ndarray
        :param img2: numpy.ndarray
        :param img3: numpy.ndarray
        :rtype: bool
        """
        mask = self.frame_sub(img1, img2, img3)
        white_pixel = cv2.countNonZero(mask)

        return white_pixel > self.motion_threshold


if __name__ == '__main__':
    with VideoCapture() as cap:
        # カメラが起動するのに若干時間がかかるので初回にWAITを入れる
        sleep(2)
        detector = MotionDetect()

        # 動体検出の差分判定用にフレームを2枚取得してグレースケール変換しpushしておく
        detector.push(cv2.cvtColor(cap.read()[1], cv2.COLOR_RGB2GRAY))
        detector.push(cv2.cvtColor(cap.read()[1], cv2.COLOR_RGB2GRAY))

        take_count = 100

        print('動体検知開始')

        while take_count > 0:
            detector.push(cv2.cvtColor(cap.read()[1], cv2.COLOR_RGB2GRAY))
        [Response] -- Flask Response object
    '''
    return Response(generate(),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


def generate():
    '''Get the frame of the thread and return as bytes
    '''
    while True:
        obj = shared_frame.get_obj()
        frame = np.ctypeslib.as_array(obj)
        frame = frame.reshape(config.frame_size_h, config.frame_size_w, 3)
        _, buffer = cv2.imencode('.jpg', frame)
        frame = buffer.tostring()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')


if __name__ == '__main__':
    set_start_method("spawn")
    video_capture = VideoCapture()
    shared_frame = Array(ctypes.c_uint8,
                         config.frame_size_h * config.frame_size_w * 3,
                         lock=True)
    process = Process(target=video_capture.run, args=(1, shared_frame))
    process.start()
    app.run(host=config.host,
            port=config.port,
            debug=config.debug,
            threaded=config.threaded)
Exemplo n.º 27
0
class RealSawyerLift(object):

    def __init__(self, control_freq=20, horizon=1000, camera_height=256,grip_thresh=.25,
                 camera_width=256, use_image=False, use_proprio=False,
                 port1=None, joint_proprio=True, do_sleep=True):

        self.joint_proprio = joint_proprio
        self.ctrl_freq = control_freq
        self.t = horizon
        self.use_image = use_image
        self.use_proprio = use_proprio
        self.do_sleep = do_sleep

        assert self.use_image or self.use_proprio, 'One of use_image and use_proprio must be set'
        # self.controller = controller
        self.cam_dim = (camera_height, camera_width)

        self.control_timestep = 1. / self.ctrl_freq
        self.is_py2 = sys.version_info[0] < 3
        self.gripper_closed = 0 # 0 = open 1 = closed
        self.timestep = 0
        self.done = False
        self.has_object_obs = False
        self.data_size = 96000

        self.grip_thresh=grip_thresh
        self.sawyer_interface = None
        self.camera = None

        #np.random.seed(int(time.time()))
        self.port1 = port1 #np.random.choice(range(PORT_LOW, PORT_HIGH))
        self.port2 = self.port1 + 1
        self.debounce = 0  # make sure the gripper doesnt toggle too quickly

        self.config = make_config('RealSawyerDemoServerConfig')
        self.config.infer_settings()

        self.controller = self.config.controller.mode

        if self.is_py2:
            self.bridge = PythonBridge(self.port1, self.port2, self.data_size)
            self._setup_robot()
            self._recv_loop()
        else:
            self.bridge = PythonBridge(self.port2, self.port1, self.data_size)
            self._setup_camera()

    def signal_handler(self, signal, frame):
        """
        Exits on ctrl+C
        """
        if self.is_py2:
            self.sawyer_interface.open_gripper()
            if self.controller=='opspace':
                self.osc_controller.reset_flag.publish(True)

    def send(self, data):
        #self.close()
        #exit()
        self.bridge.send(data)

    def recv(self):
        data = self.bridge.recieve()
        action = array('f')
        action.fromstring(data)
        data = np.array(action)

        # print('Recieved', data)
        if data[0] == RESET and data[3] == RESET and data[-1] == RESET:
            self.reset()
        elif data[0] == SUCCESS and data[3] == SUCCESS and data[-1] == SUCCESS:
            return True
        elif data[0] == GET_POSE and data[3] == GET_POSE and data[-1] == GET_POSE:
            pose = self.sawyer_interface.ee_pose
            pose = self.sawyer_interface.pose_endpoint_transform(pose, des_endpoint='right_hand',
                                                                 curr_endpoint='right_gripper_tip')
            pose.append(self.sawyer_interface._gripper.get_position())
            self.send(array('f', pose))
        elif data[0] == GET_PROPRIO and data[3] == GET_PROPRIO and data[-1] == GET_PROPRIO:
            joint_pos = np.array(self.sawyer_interface.q)
            joint_vel = np.array(self.sawyer_interface.dq)

            gripper_pos = np.array(self.sawyer_interface._gripper.get_position())

            ee_pose = self.sawyer_interface.ee_pose
            ee_pose = np.array(self.sawyer_interface.pose_endpoint_transform(ee_pose, des_endpoint='right_hand',
                                                                    curr_endpoint='right_gripper_tip'))

            print(joint_pos.shape, joint_vel.shape, gripper_pos.shape, ee_pose.shape)
            pose = np.concatenate([
                np.sin(joint_pos), np.cos(joint_pos), joint_vel, [gripper_pos], [-gripper_pos], ee_pose
            ])
            print(pose)
            self.send(array('f', pose))
        elif data[0] == CLOSE and data[3] == CLOSE and data[-1] == CLOSE:
            self.close()
            exit()
        else:
            self._apply_action(data)

    def _recv_loop(self):
        while True:
            self.recv()

    def _setup_camera(self):
        self.camera = VideoCapture(0, self.cam_dim)

    def _setup_robot(self):
        self.osc_robot = make_robot(self.config.robot.type, config=self.config)
        self.osc_controller = make_controller(self.config.controller.type, robot=self.osc_robot, config=self.config)
        self.osc_controller.reset()
        self.osc_controller.sync_state()

        self.sawyer_interface = self.osc_robot.robot_arm

        signal.signal(signal.SIGINT, self.signal_handler)  # Handles ctrl+C

    def reset(self):
        self.timestep = 0

        if self.is_py2:
            self.sawyer_interface.open_gripper()
            self.sawyer_interface.reset()
            time.sleep(1)
            # Successful Reset
            self.send(array('f', np.array([SUCCESS] * self.dof).tolist()))
            return
        else:
            # Request a reset
            self.send(array('f', np.array([RESET] * self.dof).tolist()))
            _ = self.recv()

        return self._get_observation()

    #def _process_image(self, img):
    #    h, w, _ = img.shape
    #    new_w = int( float(w) / float(h) * self.cam_dim[0])
    #    new_shape = (new_w, self.cam_dim[0])
    #
    #    resized = cv2.resize(img, new_shape, cv2.INTER_AREA)
    #
    #    center = new_w // 2
    #    left = center - self.cam_dim[1] // 2 # assume for now that this is multiple of 2
    #    right = center + self.cam_dim[1] // 2
    #
    #    img = resized[:,left:right,:]
    #    img = np.array([img])
    #    img = np.transpose(img, (0, 3, 1, 2))
    #    return img

    def _get_image(self):
        ret, frame = self.camera.read()
        if not ret:
            raise RuntimeError('camera read failed')
        return frame

    def _get_proprio(self):
        if self.joint_proprio:
            self.send(array('f', np.array([GET_PROPRIO]*self.dof).tolist()))
        else:
            self.send(array('f', np.array([GET_POSE]*self.dof).tolist()))
        data = self.bridge.recieve()
        proprio = array('f')
        proprio.fromstring(data)
        return np.array(proprio)

    def _get_observation(self):
        start = time.time()
        di = {}

        if self.use_image:
            img = self._get_image()
            di['image'] = img
        if self.use_proprio:
            di['proprio'] = self._get_proprio()
        # print('Observation retrieval time: {}'.format(time.time()-start))
        return di

    def _pre_action(self, action):

        if self.controller == 'velocity':
            # TODO: there is linear interpolation being done between the velocity limits
            #       in sim, do we have something similar to that on the real robot?
            action = np.clip(action, -0.3, 0.3)

        return action

    def _apply_action(self, action):
        if self.is_py2:
            if self.controller == 'velocity':
                self.sawyer_interface.dq = action[:-1]

            elif self.controller == 'opspace':
                self.osc_controller.send_action(action[:-1])

            if self.debounce:
                self.debounce-=1
            else:
                if self.gripper_closed:
                    if abs(action[-1]) < 1-self.grip_thresh:
                        self.sawyer_interface.open_gripper()
                        self.gripper_closed=0
                        self.debounce=5
                        print('open gripper')
                else:
                    if abs(action[-1])>self.grip_thresh:
                        self.sawyer_interface.close_gripper()
                        self.gripper_closed=1
                        self.debounce=5
                        print('close gripper')

        else:
            # send to py2
            self.send(bytes(array('f', action.tolist())))

    @property
    def action_spec(self):
        low = np.ones(self.dof) * -1.
        high = np.ones(self.dof) * 1.
        return low, high

    @property
    def dof(self):
        return 8

    def step(self, action):

        if self.done:
            raise RuntimeError('Env is terminated')

        start = time.time()

        self.timestep += 1
        action = self._pre_action(action)

        self._apply_action(action)

        if self.do_sleep:
            end_time = start + self.control_timestep
            while time.time() < end_time:
                pass

        return self._get_observation(), 0., False, {}

    def render(self):
        cv2.imshow('Observation', self._get_image())
        cv2.waitKey(1)

    def observation_spec(self):
        observation = self._get_observation()
        return observation

    def close(self):
        if self.is_py2:
            self.sawyer_interface.open_gripper()
            self.sawyer_interface.reset()
        else:
            self.camera.release()
            self.send(array('f', np.array([CLOSE] * self.dof).tolist()))
Exemplo n.º 28
0
class Controller(object):

    def __init__(self, env, video_device):
        try:
            self.API_ENDPOINT = env['ApiEndpoint']
            self.FACE_AREA_THRESHOLD = env['FaceAreaThreshold']
            self.NAME_TTL_SEC = env['NameTtlSec']
            self.FACE_SIMILARITY_THRESHOLD = env['FaceSimilarityThreshold']
            self.COGNITO_USERPOOL_ID = env['CognitoUserPoolId']
            self.COGNITO_USERPOOL_CLIENT_ID = env['CognitoUserPoolClientId']
            self.REGION = env['Region']
        except KeyError:
            print('Invalid config file')
            raise

        self.recent_name_list = []
        self.registered_name_set = set()
        self.video_capture = VideoCapture(env, video_device)
        self.detector = Detector(env)
        self.viewer = Viewer(env)

    def _update_name_list(self):
        limit_time = datetime.datetime.now() - datetime.timedelta(seconds=self.NAME_TTL_SEC)
        for d in self.recent_name_list[:]:
            if d.get('timestamp') < limit_time:
                self.recent_name_list.remove(d)

    def _sign(self, key, msg):
        return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()

    def _get_signature_key(self, key, date_stamp, region_name, service_name):
        date = self._sign(('AWS4' + key).encode('utf-8'), date_stamp)
        region = self._sign(date, region_name)
        service = self._sign(region, service_name)
        signing = self._sign(service, 'aws4_request')
        return signing

    def _get_id_token_by_cognito(self, username, password):
        client = boto3.client('cognito-idp', self.REGION)
        response = client.initiate_auth(
            ClientId=self.COGNITO_USERPOOL_CLIENT_ID,
            AuthFlow='USER_PASSWORD_AUTH',
            AuthParameters={
                'USERNAME': username,
                'PASSWORD': password
            }
        )
        return response['AuthenticationResult']['IdToken']
                         

    def run(self):
        
        # input username and password
        username = input("Enter username: "******"key 'q' is pressed")
Exemplo n.º 29
0
from flask import Flask, request
from video_capture import VideoCapture
import threading
import os

app = Flask(__name__)
vc = VideoCapture()

def remove(f):
    try:
        os.remove(f)
    except:
        pass
    
@app.route('/', methods=['GET'])
def index():
    return 'Use /start or /stop!'

@app.route('/configure', methods=['GET'])
def configure():
    if request.args.get('url'):
        vc.url = request.args.get('url')
    if request.args.get('video_source'):
        try:
            vc.configure(int(request.args.get('video_source')))
        except:
            vc.configure(request.args.get('video_source'))

    return 'configured'

@app.route('/start', methods=['GET'])
Exemplo n.º 30
0
from flask import Flask, render_template, Response
from video_capture import VideoCapture
import config

app = Flask(__name__)

@app.route('/')
def index():
        '''Render the main html page'''
        return render_template('index.html', frame_size_w = config.frame_size_w, frame_size_h = config.frame_size_h)

@app.route('/video_feed')
def video_feed():
    '''Call the generate function and return a Flask response
    
    Returns:
        [Response] -- Flask Response object
    '''
    return Response(generate(), mimetype='multipart/x-mixed-replace; boundary=frame')

def generate():
    '''Get the frame of the thread and return as bytes
    '''
    while True:
        frame = video_capture.frame
        yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

if __name__ == '__main__':
    video_capture = VideoCapture()   
    video_capture.start()
    app.run(host=config.host, port=config.port, debug=config.debug, threaded=config.threaded)