def makeChain(self): """Create the filterchains """ self.fork = core.ForkFrameFilterN("fork_" + str(self.slot)) self.fork_yuv = core.ForkFrameFilterN("fork_yuv_" + str(self.slot)) self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = True self.avthread = core.AVThread( "avthread_" + self.idst, self.fork_yuv, # writes to self.fork_yuv self.framefifo_ctx) self.avthread.setAffinity(self.affinity) # initial connections : live stream self.createLiveContext() # LiveThread writes to self.fork self.connect_to_stream( "live_decode_" + str(self.slot), self.avthread.getFrameFilter()) # self.fork to AVThread self.connect_to_stream("recorder_" + str(self.slot), self.valkkafsmanager.getFrameFilter() ) # self.fork to ValkkaFSWriterThread self.valkkafsmanager.setInput(self.id_rec, self.slot)
def makeChain(self): """Create the filterchains """ self.fork = core.ForkFrameFilterN("fork_" + str(self.slot)) self.fork_yuv = core.ForkFrameFilterN("fork_yuv_" + str(self.slot)) self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = True self.avthread = core.AVThread( "avthread_" + self.idst, self.fork_yuv, # writes to self.fork_yuv self.framefifo_ctx) self.avthread.setAffinity(self.affinity) self.info = core.InfoFrameFilter("debug") # initial connections : recorded stream self.connect_to_stream( "rec_decode_" + str(self.slot), self.avthread.getBlockingFrameFilter()) # self.fork to AVThread # self.connect_to_stream("rec_decode_"+str(self.slot), self.info) # debug # # self.valkkafs.setOutput(_id, slot, framefilter) self.ctx = self.valkkafsmanager.setOutput( self.id_rec, self.slot, self.fork) # recorded stream to self.fork
def make_decode_branch(self): self.fork_filter_decode = core.ForkFrameFilterN("fork_filter_decode_" + str(self.slot)) self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread("avthread_" + str(self.slot), self.fork_filter_decode, self.framefifo_ctx) if self.affinity > -1: # affinity overwrites number of threads self.avthread.setAffinity(self.affinity) elif self.number_of_threads > 1: self.avthread.setNumberOfThreads( self.number_of_threads) # two by default # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter() # connect to main: self.fork_filter_main.connect("decoding_" + str(self.slot), self.av_in_filter)
def makeChain(self): """Create the filter chain """ # *** main_branch *** self.fork_filter = core.ForkFrameFilterN("av_fork_at_slot_" + str( self.slot)) # FrameFilter chains can be attached to ForkFrameFilterN after it's been instantiated self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread( "avthread_" + self.idst, self.fork_filter, self.framefifo_ctx) if (self.verbose): print(self.pre,"binding AVThread to core", int(self.affinity)) self.avthread.setAffinity(self.affinity) # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter() # *** swscale_branch *** self.sws_fork_filter = core.ForkFrameFilterN("sws_fork_at_slot_" + str(self.slot)) self.sws_filter = core.SwScaleFrameFilter("sws_filter", self.width, self.height, self.sws_fork_filter) self.interval_filter = core.TimeIntervalFrameFilter("interval_filter", self.shmem_image_interval, self.sws_filter)
def makeChain(self): """Create the filter chain """ # branch 1 self.gl_in_filter = self.openglthread.getInput( ) # get input FrameFilter from OpenGLThread self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread("avthread_" + self.idst, self.gl_in_filter, self.framefifo_ctx) self.avthread.setAffinity(self.affinity) self.av_in_filter = self.avthread.getFrameFilter( ) # get input FrameFilter from AVThread # branch 2 self.live_out_filter = self.outgoing_livethread.core.getFrameFilter( ) # writing here writes to outgoing_livethread # fork self.fork = core.ForkFrameFilter( "fork", self.av_in_filter, self.live_out_filter) # this one is used by incoming_livethread
def makeChain(self): """Create the filter chain """ self.gl_in_filter = self.openglthread.getInput( ) # get input FrameFilter from OpenGLThread self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread( "avthread_" + self.idst, self.gl_in_filter, self.framefifo_ctx) if self.affinity > -1 and self.n_threads > 1: print("WARNING: can't use affinity with multiple threads") self.avthread.setAffinity(self.affinity) if self.affinity > -1: self.avthread.setNumberOfThreads(self.n_threads) # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter()
def makeChain(self): """Create the filter chain """ if (self.shmem_name is None): self.shmem_name = "shmemff" + self.idst # print(self.pre,self.shmem_name) # self.n_bytes =self.shmem_image_dimensions[0]*self.shmem_image_dimensions[1]*3 n_buf = self.shmem_ringbuffer_size # branch 1 # get input FrameFilter from OpenGLThread self.gl_in_filter = self.openglthread.getInput() # branch 2 # print(self.pre,"using shmem name",self.shmem_name) # print(self.shmem_name) self.shmem_filter = core.RGBShmemFrameFilter( self.shmem_name, n_buf, self.shmem_image_dimensions[0], self.shmem_image_dimensions[1]) # shmem id, cells, width, height # self.shmem_filter =core.InfoFrameFilter ("info"+self.idst) # # debug self.sws_filter = core.SwScaleFrameFilter( "sws_filter" + self.idst, self.shmem_image_dimensions[0], self.shmem_image_dimensions[1], self.shmem_filter) self.interval_filter = core.TimeIntervalFrameFilter( "interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter) # fork: writes to branches 1 and 2 # self.fork_filter =core.ForkFrameFilter # ("fork_filter"+self.idst,self.gl_in_filter,self.sws_filter) # FIX self.fork_filter = core.ForkFrameFilter( "fork_filter" + self.idst, self.gl_in_filter, self.interval_filter) # self.fork_filter =core.ForkFrameFilter ("fork_filter"+self.idst,self.gl_in_filter,None) # self.fork_filter=self.gl_in_filter # debugging # main branch self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread( "avthread_" + self.idst, self.fork_filter, self.framefifo_ctx) # AVThread writes to self.fork_filter self.avthread.setAffinity(self.affinity) # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter()
def makeChain(self): """Create the filter chain """ self.gl_in_filter =self.openglthread.getInput() # get input FrameFilter from OpenGLThread self.framefifo_ctx=core.FrameFifoContext() self.framefifo_ctx.n_basic =self.n_basic self.framefifo_ctx.n_setup =self.n_setup self.framefifo_ctx.n_signal =self.n_signal self.framefifo_ctx.flush_when_full =self.flush_when_full self.avthread =core.AVThread("avthread_"+self.idst, self.gl_in_filter, self.framefifo_ctx) self.avthread.setAffinity(self.affinity) self.av_in_filter =self.avthread.getFrameFilter() # get input FrameFilter from AVThread
def makeChain(self): """Create the filter chain """ self.fork_filter=core.ForkFrameFilterN("av_fork_at_slot_"+str(self.slot)) # FrameFilter chains can attached to ForkFrameFilterN after it's been instantiated self.framefifo_ctx=core.FrameFifoContext() self.framefifo_ctx.n_basic =self.n_basic self.framefifo_ctx.n_setup =self.n_setup self.framefifo_ctx.n_signal =self.n_signal self.framefifo_ctx.flush_when_full =self.flush_when_full self.avthread =core.AVThread("avthread_"+self.idst, self.fork_filter, self.framefifo_ctx) self.avthread.setAffinity(self.affinity) self.av_in_filter =self.avthread.getFrameFilter() # get input FrameFilter from AVThread
def make_decode_branch(self): self.fork_filter_decode = core.ForkFrameFilterN("fork_filter_decode_" + str(self.slot)) self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread( "avthread_" + str(self.slot), self.fork_filter_decode, self.framefifo_ctx) self.avthread.setAffinity(self.affinity) # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter() # connect to main: self.fork_filter_main.connect("decoding_" + str(self.slot), self.av_in_filter)
def makeChain(self): """Create the filter chain """ # branch 1 # get input FrameFilter from OpenGLThread self.gl_in_filter = self.openglthread.getInput() # branch 2 self.sws_filter = core.SwScaleFrameFilter("sws_filter" + self.idst, self.image_dimensions[0], self.image_dimensions[1], self.threadsafe_filter) # MovementFrameFilter::MovementFrameFilter(char const *,long,float,long,FrameFilter *) self.movement_filter = core.MovementFrameFilter( "movement_" + self.idst, self.movement_interval, self.movement_treshold, self.movement_duration, self.sws_filter) if self.movement_callback is not None: self.movement_filter.setCallback(self.movement_callback) # main branch self.fork_filter = core.ForkFrameFilter("fork_filter" + self.idst, self.gl_in_filter, self.movement_filter) self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full self.avthread = core.AVThread( "avthread_" + self.idst, self.fork_filter, self.framefifo_ctx) # AVThread writes to self.fork_filter self.avthread.setAffinity(self.affinity) # get input FrameFilter from AVThread self.av_in_filter = self.avthread.getFrameFilter()
- FileCacheThread .. per slot, two substream SetupFrame(s) .. or what? - Video jerks a bit .. is this because the play edge is too close to the block edge and it runs empty before new frames arrive? """ setValkkaLogLevel(loglevel_debug) def cb(mstime): print("mstime callback", mstime) # create OpenGLThread (for drawing video) and AVThread (for decoding) glthread = OpenGLThread(name="gl_thread") ctx = core.FrameFifoContext() avthread = core.AVThread( "avthread", glthread.getInput(), ctx) av_in_filter = avthread.getFrameFilter() avthread.startCall() avthread.decodingOnCall() # create an X-window window_id = glthread.createWindow() # map frames with slot 1 to that window glthread.newRenderGroup(window_id) context_id = glthread.newRenderContext(1, window_id, 0) valkkafs = ValkkaFS.loadFromDirectory(dirname="/home/sampsa/tmp/testvalkkafs") # manager = ValkkaFSManager(valkkafs, cb)
def makeChain(self): """ Create the filter chain """ setValkkaLogLevel(loglevel_silent) # if (self.shmem_name is None): # self.shmem_name = "shmemff" + self.idst print(self.pre, self.shmem_name) # self.n_bytes = self.shmem_image_dimensions[0] * self.shmem_image_dimensions[1]*3 n_buf = self.shmem_ringbuffer_size # Branch 1 : Displaying Stream to the dashboard # get input FrameFilter from OpenGLThread self.gl_in_filter = self.openglthread.getInput() # Decoding for displaying self.avthread1_1 = core.AVThread( "avthread_" + self.idst, # self.fork_filter, # AVthread writes to self.fork_filter self.gl_in_filter, # self.framefifo_ctx ) self.avthread1_1.setAffinity(self.affinity) # get input framefilter from avthread self.av_in_filter1_1 = self.avthread1_1.getFrameFilter() # Branch 2 : Saving frames to shared memory for openCV/Tensorflow process # these two lines for debugging bullshit so feel free to comment/uncomment them ya man print(self.pre, "using shmem name ", self.shmem_name) print(self.shmem_name) try: self.shmem_filter = core.RGBShmemFrameFilter( self.shmem_name, n_buf, self.shmem_image_dimensions[0], self.shmem_image_dimensions[1] ) if self.shmem_filter: print("Shared mem created ") except Exception as e: print(" There is a problem in allocating memory to RGBShmemFrameFilter : \n" + e) # self.shmem_filter = core.InfoFrameFilter("info"+ selft.idst) ## debugging self.sws_filter = core.SwScaleFrameFilter( "sws_filter" + self.idst, self.shmem_image_dimensions[0], self.shmem_image_dimensions[1], self.shmem_filter ) if self.sws_filter: print("Sws_filter created !") self.interval_filter = core.TimeIntervalFrameFilter( "interval_filter" + self.idst, self.shmem_image_interval, self.sws_filter ) if self.interval_filter: print("interval_filter created ") self.avthread2_1 = core.AVThread( "avthread_" + self.idst, # self.fork_filter, # AVthread writes to self.fork_filter self.interval_filter, # self.framefifo_ctx ) self.avthread2_1.setAffinity(self.affinity) # get input framefilter from avthread self.av_in_filter2_1 = self.avthread2_1.getFrameFilter() # Branch 3 : Converting Stream to MP4 | Upload to Azure blob storage if activated # For the moment this branch recieve h264 stream and convert it to fragmp4 chunks n_buf_fragmp4 = self.frag_shmem_buffers nb_cells = 1024 * 1024 * 3 # print(type(nb_cells)) # print(nb_cells) try: self.fshmem_filter = core.FragMP4ShmemFrameFilter( self.frag_shmem_name, n_buf_fragmp4, nb_cells ) except Exception as e: print("Failed to create fragmp4 shared memory server : \n", e) if (self.fshmem_filter): print("fshmem filter created") self.mux_filter = core.FragMP4MuxFrameFilter( "fragmp4muxer", self.fshmem_filter ) if (self.mux_filter): print("mux filter created") # self.mux_filter.activate() # Fork : Writes to branches 1, 2 and 3 self.fork_filter = core.ForkFrameFilter3( "fork_filter" + self.idst, self.av_in_filter1_1, self.av_in_filter2_1, self.mux_filter ) # Main branch self.framefifo_ctx = core.FrameFifoContext() self.framefifo_ctx.n_basic = self.n_basic self.framefifo_ctx.n_setup = self.n_setup self.framefifo_ctx.n_signal = self.n_signal self.framefifo_ctx.flush_when_full = self.flush_when_full
import valkka from valkka import core print("Valkka loaded ok") print(" Version ", valkka.core.__version__) print(" Core loaded from ", core.__file__) """Test instantiation of some objects """ print() print(" Testing Valkka classes") live = core.LiveThread("live") # inp =core.FrameFifo("fifo") # in the API no more # ff =core.FifoFrameFilter("fifo",inp) out = core.DummyFrameFilter("dummy") av = core.AVThread("av", out) gl = core.OpenGLThread("gl") av_in = av.getFrameFilter() gl_in = gl.getFrameFilter() ctx = core.LiveConnectionContext() ctx.slot = 1 ctx.connection_type = core.LiveConnectionType_rtsp ctx.address = "rtsp://*****:*****@192.168.0.157" ctx2 = core.LiveConnectionContext(core.LiveConnectionType_rtsp, "rtsp://*****:*****@192.168.0.157", 1, out) print(" Valkka classes ok") print() # this is modified automatically by setver.bash - don't touch!
def __call__(self, livethread=None, openglthread=None): """ Register running live & openglthreads, construct filterchain, start threads """ assert (livethread is not None) self.livethread = livethread self.openglthread = openglthread # Construct Filter graph from end to beginning # Main branch self.main_fork = core.ForkFrameFilterN("main_fork" + str(self.slot)) # connect livethread to main branch self.live_ctx = core.LiveConnectionContext( core.LiveConnectionType_rtsp, self.rtsp_address, self.slot, self.main_fork) # stream rights to main_fork # Some aditional parameters you can give to livethread streaming context ## 1 : for NATs and Streaming over the internet, use tcp streaming self.live_ctx.request_tcp = True ## 2 : if you don't have enough buffering or timestamps are wrong, use this: # self.live_ctx.time_correction = core.TimeCorrectionType_smart ## 3 : enable automatic reconnection every 10 seconds if camera is offline self.live_ctx.mstimeout = 10000 self.livethread.registerStreamCall(self.live_ctx) # Branch B : Mux Branch self.fmp4_shmem = core.FragMP4ShmemFrameFilter( self.fmp4_shmem_name, self.fmp4_shmem_buffers, self.fmp4_shmem_cellsize) print(">", self.fmp4_sync_event) self.fmp4_shmem.useFd(self.fmp4_sync_event) self.fmp4_muxer = core.FragMP4MuxFrameFilter("mp4_muxer", self.fmp4_shmem) # self.fmp4_muxer.activate() # connect main branch to mux branch self.main_fork.connect("fragmp4_terminal" + str(self.slot), self.fmp4_muxer) # muxer must be connected from the very beginning so that it receives setupframes, sent only in the beginning of streaming process # Branch A : Decoding Branch self.decode_fork = core.ForkFrameFilterN("decode_fork_" + str(self.slot)) self.avthread = core.AVThread( "avthread_" + str(self.slot), self.decode_fork) # Here avthread feeds decode_fork # connect main branch to avthread to decode_fork self.avthread_in_filter = self.avthread.getFrameFilter() self.main_fork.connect("decoder_" + str(self.slot), self.avthread_in_filter) # Branch A : Sub_Branch_A.1 : RGB shared memory self.rgb_shmem_filter = core.RGBShmemFrameFilter( self.rgb_shmem_name, self.rgb_shmem_buffers, self.width, self.height) self.rgb_shmem_filter.useFd(self.rgb_sync_event) self.sws_filter = core.SwScaleFrameFilter("sws_filter", self.width, self.height, self.rgb_shmem_filter) self.interval_filter = core.TimeIntervalFrameFilter( "interval_filter", self.image_interval, self.sws_filter) self.decode_fork.connect("rgb_shmem_terminal" + str(self.slot), self.interval_filter) # Branch A : Sub_Branch_A.2 : OpenGl branch Displaying if self.openglthread is not None: # connect decode frames in opengl self.opengl_input_filter = self.openglthread.getFrameFilter() self.decode_fork.connect("gl_terminal" + str(self.slot), self.opengl_input_filter) # Create X window # # win_id = self.openglthread.createWindow(show=False) # frame = QtFrame(self.widget, win_id) # self.lay.addWidget(frame.widget, 0, 0) # # token = self.openglthread.connect(slot=self.slot, window_id=win_id) # if token == 0: # print("mapping failled ! ") # else: # print("mapping done ! ") self.window_id = self.openglthread.createWindow() self.openglthread.newRenderGroupCall(self.window_id) self.context_id = self.openglthread.newRenderContextCall( self.slot, self.window_id, 0) self.livethread.playStreamCall(self.live_ctx) self.avthread.startCall() self.avthread.decodingOnCall()