コード例 #1
0
ファイル: manager3.py プロジェクト: jlerasmus/valkka-core
    def makeChain(self):
        """Create the filter chain
        """
        # *** main_branch ***
        self.fork_filter = core.ForkFrameFilterN("av_fork_at_slot_" + str(
            self.slot))  # FrameFilter chains can be attached to ForkFrameFilterN after it's been instantiated

        self.framefifo_ctx = core.FrameFifoContext()
        self.framefifo_ctx.n_basic = self.n_basic
        self.framefifo_ctx.n_setup = self.n_setup
        self.framefifo_ctx.n_signal = self.n_signal
        self.framefifo_ctx.flush_when_full = self.flush_when_full

        self.avthread = core.AVThread(
            "avthread_" + self.idst,
            self.fork_filter,
            self.framefifo_ctx)
        if (self.verbose): print(self.pre,"binding AVThread to core", int(self.affinity))
        self.avthread.setAffinity(self.affinity)
        # get input FrameFilter from AVThread
        self.av_in_filter = self.avthread.getFrameFilter()
        
        # *** swscale_branch ***
        self.sws_fork_filter = core.ForkFrameFilterN("sws_fork_at_slot_" + str(self.slot))
        self.sws_filter      = core.SwScaleFrameFilter("sws_filter", self.width, self.height, self.sws_fork_filter)
        self.interval_filter = core.TimeIntervalFrameFilter("interval_filter", self.shmem_image_interval, self.sws_filter)
コード例 #2
0
    def makeChain(self):
        """Create the filter chain
        """
        if (self.shmem_name is None):
            self.shmem_name = "shmemff" + self.idst
        # print(self.pre,self.shmem_name)

        # self.n_bytes =self.shmem_image_dimensions[0]*self.shmem_image_dimensions[1]*3
        n_buf = self.shmem_ringbuffer_size

        # branch 1
        # get input FrameFilter from OpenGLThread
        self.gl_in_filter = self.openglthread.getInput()

        # branch 2
        # print(self.pre,"using shmem name",self.shmem_name)
        # print(self.shmem_name)
        self.shmem_filter = core.RGBShmemFrameFilter(
            self.shmem_name,
            n_buf,
            self.shmem_image_dimensions[0],
            self.shmem_image_dimensions[1])  # shmem id, cells, width, height
        # self.shmem_filter    =core.InfoFrameFilter        ("info"+self.idst)
        # # debug

        self.sws_filter = core.SwScaleFrameFilter(
            "sws_filter" + self.idst,
            self.shmem_image_dimensions[0],
            self.shmem_image_dimensions[1],
            self.shmem_filter)
        self.interval_filter = core.TimeIntervalFrameFilter(
            "interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter)

        # fork: writes to branches 1 and 2
        # self.fork_filter     =core.ForkFrameFilter
        # ("fork_filter"+self.idst,self.gl_in_filter,self.sws_filter) # FIX
        self.fork_filter = core.ForkFrameFilter(
            "fork_filter" + self.idst,
            self.gl_in_filter,
            self.interval_filter)
        # self.fork_filter     =core.ForkFrameFilter         ("fork_filter"+self.idst,self.gl_in_filter,None)
        # self.fork_filter=self.gl_in_filter # debugging

        # main branch
        self.framefifo_ctx = core.FrameFifoContext()
        self.framefifo_ctx.n_basic = self.n_basic
        self.framefifo_ctx.n_setup = self.n_setup
        self.framefifo_ctx.n_signal = self.n_signal
        self.framefifo_ctx.flush_when_full = self.flush_when_full

        self.avthread = core.AVThread(
            "avthread_" + self.idst,
            self.fork_filter,
            self.framefifo_ctx)  # AVThread writes to self.fork_filter
        self.avthread.setAffinity(self.affinity)
        # get input FrameFilter from AVThread
        self.av_in_filter = self.avthread.getFrameFilter()
コード例 #3
0
ファイル: multifork.py プロジェクト: elsampsa/valkka-live
 def make_analysis_branch(self):
     """Connect only if movement detector is required:
     
     - Recording on movement
     - Analysis on movement
     """
     self.sws_fork_filter = core.ForkFrameFilterN("sws_fork_" + str(self.slot))
     self.sws_filter = core.SwScaleFrameFilter("sws_scale_" + str(self.slot), self.width, self.height, self.sws_fork_filter)
     self.sws_gate = core.GateFrameFilter("sws_gate_" + str(self.slot), self.sws_filter)
     self.movement_filter = core.MovementFrameFilter("movement_" + str(self.slot), 
             # self.movement_interval,
             self.shmem_image_interval,
             self.movement_treshold,
             self.movement_duration,
             self.sws_gate
             )
コード例 #4
0
ファイル: multifork.py プロジェクト: elsampsa/valkka-live
    def make_qt_branch(self):
        """Connect only if bitmaps needed at the Qt side
        """
        self.qt_fork_filter = core.ForkFrameFilterN("qt_fork_" + str(self.slot))
        
        self.qt_sws_filter =\
        core.SwScaleFrameFilter("qt_sws_scale_" + str(self.slot), self.width, 
            self.height, self.qt_fork_filter)

        self.qt_interval = core.TimeIntervalFrameFilter(
            "qt_interval_" + str(self.slot),
            # self.shmem_image_interval,
            500,
            self.qt_sws_filter
            )
        self.qt_gate = core.GateFrameFilter("qt_gate_" + str(self.slot), self.qt_interval)

        # connect to main:
        self.fork_filter_decode.connect("qt_branch_" + str(self.slot), self.qt_gate)
コード例 #5
0
ファイル: chains.py プロジェクト: ajithkumbla/valkka-examples
    def makeChain(self):
        """Create the filter chain
        """
        # branch 1
        # get input FrameFilter from OpenGLThread
        self.gl_in_filter = self.openglthread.getInput()

        # branch 2
        self.sws_filter = core.SwScaleFrameFilter("sws_filter" + self.idst,
                                                  self.image_dimensions[0],
                                                  self.image_dimensions[1],
                                                  self.threadsafe_filter)

        # MovementFrameFilter::MovementFrameFilter(char const *,long,float,long,FrameFilter *)
        self.movement_filter = core.MovementFrameFilter(
            "movement_" + self.idst, self.movement_interval,
            self.movement_treshold, self.movement_duration, self.sws_filter)

        if self.movement_callback is not None:
            self.movement_filter.setCallback(self.movement_callback)

        # main branch
        self.fork_filter = core.ForkFrameFilter("fork_filter" + self.idst,
                                                self.gl_in_filter,
                                                self.movement_filter)

        self.framefifo_ctx = core.FrameFifoContext()
        self.framefifo_ctx.n_basic = self.n_basic
        self.framefifo_ctx.n_setup = self.n_setup
        self.framefifo_ctx.n_signal = self.n_signal
        self.framefifo_ctx.flush_when_full = self.flush_when_full

        self.avthread = core.AVThread(
            "avthread_" + self.idst, self.fork_filter,
            self.framefifo_ctx)  # AVThread writes to self.fork_filter
        self.avthread.setAffinity(self.affinity)
        # get input FrameFilter from AVThread
        self.av_in_filter = self.avthread.getFrameFilter()
コード例 #6
0
    def makeChain(self):
        """
        Create the filter chain
        """
        setValkkaLogLevel(loglevel_silent)
        # if (self.shmem_name is None):
        #     self.shmem_name = "shmemff" + self.idst

        print(self.pre, self.shmem_name)

        # self.n_bytes = self.shmem_image_dimensions[0] * self.shmem_image_dimensions[1]*3
        n_buf = self.shmem_ringbuffer_size

        # Branch 1 : Displaying Stream to the dashboard
        # get input FrameFilter from OpenGLThread
        self.gl_in_filter = self.openglthread.getInput()

        # Decoding for displaying
        self.avthread1_1 = core.AVThread(
            "avthread_" + self.idst,
            # self.fork_filter,  # AVthread writes to self.fork_filter
            self.gl_in_filter,
            # self.framefifo_ctx
        )
        self.avthread1_1.setAffinity(self.affinity)

        # get input framefilter from avthread
        self.av_in_filter1_1 = self.avthread1_1.getFrameFilter()

        # Branch 2 : Saving frames to shared memory for openCV/Tensorflow process
        # these two lines for debugging bullshit so feel free to comment/uncomment them ya man
        print(self.pre, "using shmem name ", self.shmem_name)
        print(self.shmem_name)
        try:
            self.shmem_filter = core.RGBShmemFrameFilter(
                self.shmem_name,
                n_buf,
                self.shmem_image_dimensions[0],
                self.shmem_image_dimensions[1]
            )
            if self.shmem_filter:
                print("Shared mem created ")
        except Exception as e:
            print(" There is a problem in allocating memory to RGBShmemFrameFilter : \n" + e)

        # self.shmem_filter = core.InfoFrameFilter("info"+ selft.idst) ## debugging
        self.sws_filter = core.SwScaleFrameFilter(
            "sws_filter" + self.idst,
            self.shmem_image_dimensions[0],
            self.shmem_image_dimensions[1],
            self.shmem_filter
        )
        if self.sws_filter:
            print("Sws_filter created !")
        self.interval_filter = core.TimeIntervalFrameFilter(
            "interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter
        )
        if self.interval_filter:
            print("interval_filter created ")
        self.avthread2_1 = core.AVThread(
            "avthread_" + self.idst,
            # self.fork_filter,  # AVthread writes to self.fork_filter
            self.interval_filter,
            # self.framefifo_ctx
        )
        self.avthread2_1.setAffinity(self.affinity)

        # get input framefilter from avthread
        self.av_in_filter2_1 = self.avthread2_1.getFrameFilter()

        # Branch 3 : Converting Stream to MP4 | Upload to Azure blob storage if activated
        # For the moment this branch recieve h264 stream and convert it to fragmp4 chunks
        n_buf_fragmp4 = self.frag_shmem_buffers

        nb_cells = 1024 * 1024 * 3
        # print(type(nb_cells))
        # print(nb_cells)
        try:
            self.fshmem_filter = core.FragMP4ShmemFrameFilter(
                self.frag_shmem_name,
                n_buf_fragmp4,
                nb_cells
            )
        except Exception as e:
            print("Failed to create fragmp4 shared memory server : \n", e)
        if (self.fshmem_filter):
            print("fshmem filter created")
        self.mux_filter = core.FragMP4MuxFrameFilter(
            "fragmp4muxer",
            self.fshmem_filter
        )
        if (self.mux_filter):
            print("mux filter created")
        # self.mux_filter.activate()

        # Fork : Writes to branches 1, 2 and 3
        self.fork_filter = core.ForkFrameFilter3(
            "fork_filter" + self.idst,
            self.av_in_filter1_1,
            self.av_in_filter2_1,
            self.mux_filter

        )

        # Main branch
        self.framefifo_ctx = core.FrameFifoContext()
        self.framefifo_ctx.n_basic = self.n_basic
        self.framefifo_ctx.n_setup = self.n_setup
        self.framefifo_ctx.n_signal = self.n_signal
        self.framefifo_ctx.flush_when_full = self.flush_when_full
コード例 #7
0
    def __call__(self, livethread=None, openglthread=None):
        """
        Register running live & openglthreads, construct filterchain, start threads

        """
        assert (livethread is not None)
        self.livethread = livethread
        self.openglthread = openglthread

        # Construct Filter graph from end to beginning
        # Main branch
        self.main_fork = core.ForkFrameFilterN("main_fork" + str(self.slot))

        # connect livethread to main branch
        self.live_ctx = core.LiveConnectionContext(
            core.LiveConnectionType_rtsp, self.rtsp_address, self.slot,
            self.main_fork)  # stream rights to main_fork

        # Some aditional parameters you can give to livethread streaming context
        ## 1 : for NATs and Streaming over the internet, use tcp streaming
        self.live_ctx.request_tcp = True
        ## 2 : if you don't have enough buffering or timestamps are wrong, use this:
        # self.live_ctx.time_correction = core.TimeCorrectionType_smart
        ## 3 : enable automatic reconnection every 10 seconds if camera is offline
        self.live_ctx.mstimeout = 10000

        self.livethread.registerStreamCall(self.live_ctx)

        # Branch B : Mux Branch
        self.fmp4_shmem = core.FragMP4ShmemFrameFilter(
            self.fmp4_shmem_name, self.fmp4_shmem_buffers,
            self.fmp4_shmem_cellsize)
        print(">", self.fmp4_sync_event)
        self.fmp4_shmem.useFd(self.fmp4_sync_event)
        self.fmp4_muxer = core.FragMP4MuxFrameFilter("mp4_muxer",
                                                     self.fmp4_shmem)
        # self.fmp4_muxer.activate()
        # connect main branch to mux branch
        self.main_fork.connect("fragmp4_terminal" + str(self.slot),
                               self.fmp4_muxer)
        # muxer must be connected from the very beginning so that it receives setupframes, sent only in the beginning of streaming process

        # Branch A : Decoding Branch
        self.decode_fork = core.ForkFrameFilterN("decode_fork_" +
                                                 str(self.slot))
        self.avthread = core.AVThread(
            "avthread_" + str(self.slot),
            self.decode_fork)  # Here avthread feeds decode_fork
        # connect main branch to avthread to decode_fork
        self.avthread_in_filter = self.avthread.getFrameFilter()
        self.main_fork.connect("decoder_" + str(self.slot),
                               self.avthread_in_filter)

        # Branch A : Sub_Branch_A.1 : RGB shared memory
        self.rgb_shmem_filter = core.RGBShmemFrameFilter(
            self.rgb_shmem_name, self.rgb_shmem_buffers, self.width,
            self.height)
        self.rgb_shmem_filter.useFd(self.rgb_sync_event)
        self.sws_filter = core.SwScaleFrameFilter("sws_filter", self.width,
                                                  self.height,
                                                  self.rgb_shmem_filter)
        self.interval_filter = core.TimeIntervalFrameFilter(
            "interval_filter", self.image_interval, self.sws_filter)
        self.decode_fork.connect("rgb_shmem_terminal" + str(self.slot),
                                 self.interval_filter)

        # Branch A : Sub_Branch_A.2 : OpenGl branch Displaying
        if self.openglthread is not None:
            # connect decode frames in opengl
            self.opengl_input_filter = self.openglthread.getFrameFilter()
            self.decode_fork.connect("gl_terminal" + str(self.slot),
                                     self.opengl_input_filter)
            # Create X window
            #
            # win_id = self.openglthread.createWindow(show=False)
            # frame = QtFrame(self.widget, win_id)
            # self.lay.addWidget(frame.widget, 0, 0)
            #
            # token = self.openglthread.connect(slot=self.slot, window_id=win_id)
            # if token == 0:
            #     print("mapping failled  ! ")
            # else:
            #     print("mapping done ! ")
            self.window_id = self.openglthread.createWindow()
            self.openglthread.newRenderGroupCall(self.window_id)

            self.context_id = self.openglthread.newRenderContextCall(
                self.slot, self.window_id, 0)

        self.livethread.playStreamCall(self.live_ctx)
        self.avthread.startCall()
        self.avthread.decodingOnCall()