Example #1
0
  def __init__(self, pardic):
    self.pardic=pardic
    self.true_screens =[]
    self.openglthreads=[]
    self.findXScreens()
    
    # self.true_screens=[self.true_screens[0]]
    
    for n_gpu, screen in enumerate(self.true_screens):
    
      x_connection=":0."+str(n_gpu)
      # x_connection=":0.1"
      # x_connection=":1.0" # nopes
    
      print(pre,"GPUHandler: starting OpenGLThread with",x_connection)
    
      openglthread=OpenGLThread(     # starts frame presenting services
        name    ="gpu_"+str(n_gpu),
        n_720p   =self.pardic["n_720p"],   # reserve stacks of YUV video frames for various resolutions
        n_1080p  =self.pardic["n_1080p"],
        n_1440p  =self.pardic["n_1440p"],
        n_4K     =self.pardic["n_4K"],
        verbose =False,
        msbuftime=self.pardic["msbuftime"],
        affinity=self.pardic["gl affinity"],
        x_connection =x_connection
        )
      
      print(pre,"GPUHandler: OpenGLThread started")

      self.openglthreads.append(openglthread)
      
    if (self.openglthreads[0].hadVsync()):
      w=QtWidgets.QMessageBox.warning(None,"VBLANK WARNING","Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'")
Example #2
0
    def openValkka(self):
        self.thread = QValkkaThread() # the thread that's watching the mvision_processes
        self.thread.start()
        
        self.mvision_process.start()
        self.thread.addProcess(self.mvision_process)
        
        # """
        self.livethread = LiveThread(         # starts live stream services (using live555)
            name="live_thread",
            verbose=False
        )

        self.filethread = FileThread(
            name="file_thread",
            verbose=False
        )

        self.openglthread = OpenGLThread(     # starts frame presenting services
            name="mythread",
            n_720p=10,
            n_1080p=10,
            n_1440p=10,
            n_4K=10,
            verbose=False,
            msbuftime=100,
            affinity=-1
        )

        # this filterchain creates a shared memory server
        self.chain = ShmemFilterchain1(       # decoding and branching the stream happens here
            openglthread = self.openglthread,
            slot = 1,
            shmem_name = self.shmem_name,
            shmem_image_dimensions = self.shmem_image_dimensions,
            shmem_image_interval = self.shmem_image_interval,
            shmem_ringbuffer_size = self.shmem_ringbuffer_size
        )

        shmem_name, n_buffer, shmem_image_dimensions = self.chain.getShmemPars()
        self.video = QtWidgets.QWidget(self.video_area)
        self.win_id = int(self.video.winId())

        self.video_lay.addWidget(self.video, 0, 0)
        self.token = self.openglthread.connect(slot = 1, window_id = self.win_id)

        self.chain.decodingOn()  # tell the decoding thread to start its job

        self.mvision_process.activate(
            n_buffer                = self.shmem_ringbuffer_size, 
            image_dimensions        = self.shmem_image_dimensions, 
            shmem_name              = self.shmem_name  
        )
Example #3
0
    def __init__(self, **kwargs):
        self.pre = self.__class__.__name__ + " : "  # auxiliary string for debugging output
        parameterInitCheck(
            GPUHandler.parameter_defs, kwargs, self
        )  # check kwargs agains parameter_defs, attach ok'd parameters to this object as attributes
        self.kwargs = kwargs
        self.true_screens = []  # list of QtCore.QScreen
        self.openglthreads = []  # list of OpenGLThread instances
        self.findXScreens()

        # self.true_screens=[self.true_screens[0]]

        for n_gpu, screen in enumerate(self.true_screens):

            x_connection = ":0." + str(n_gpu)
            # x_connection=":0.1"
            # x_connection=":1.0" # nopes

            print(pre, "GPUHandler: starting OpenGLThread with", x_connection)

            affinity = -1
            if self.cpu_scheme:
                affinity = self.cpu_scheme.getOpenGL()

            openglthread = OpenGLThread(
                name="gpu_" + str(n_gpu),
                # reserve stacks of YUV video frames for various resolutions
                n_720p=self.n_720p,
                n_1080p=self.n_1080p,
                n_1440p=self.n_1440p,
                n_4K=self.n_4K,
                verbose=False,
                msbuftime=self.msbuftime,
                affinity=affinity,
                x_connection=x_connection)

            print(pre, "GPUHandler: OpenGLThread started")

            self.openglthreads.append(openglthread)
  def openValkka(self):
    self.livethread=LiveThread(         # starts live stream services (using live555)
      name   ="live_thread",
      # verbose=True,
      verbose=False,
      affinity=self.pardic["live affinity"]
    )
    
    # create widgets before starting OpenGLThread and reserving frames
    cw=0 # widget / window index
    self.win_ids=[]
    for address in self.addresses:
      for cc in range(0,self.pardic["replicate"]):
        fr =TestWidget0(self.w)
        win_id =int(fr.winId())
        nrow=self.pardic["videos per row"]
        print(pre,"setupUi: layout index, address : ",cw//nrow,cw%nrow,address)
        self.lay.addWidget(fr,cw//nrow,cw%nrow)
        self.videoframes.append(fr)
        self.win_ids.append(win_id)
        cw+=1
         
    win_iter =iter(self.win_ids)
            
    self.openglthread=OpenGLThread(     # starts frame presenting services
      name    ="mythread",
      n_720p   =self.pardic["n_720p"],   # reserve stacks of YUV video frames for various resolutions
      n_1080p  =self.pardic["n_1080p"],
      n_1440p  =self.pardic["n_1440p"],
      n_4K     =self.pardic["n_4K"],
      # naudio  =self.pardic["naudio"], # obsolete
      # verbose =True,
      verbose =False,
      msbuftime=self.pardic["msbuftime"],
      affinity=self.pardic["gl affinity"]
      )

    
    if (self.openglthread.hadVsync()):
      w=QtWidgets.QMessageBox.warning(self,"VBLANK WARNING","Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'")

    tokens     =[]
    self.chains=[]
    
    a =self.pardic["dec affinity start"]
    cw=0 # widget / window index
    cs=1 # slot / stream count
    
    for address in self.addresses:
      # now livethread and openglthread are running
      if (a>self.pardic["dec affinity stop"]): a=self.pardic["dec affinity start"]
      print(pre,"openValkka: setting decoder thread on processor",a)

      chain=BasicFilterchain(       # decoding and branching the stream happens here
        livethread  =self.livethread, 
        openglthread=self.openglthread,
        address     =address,
        slot        =cs,
        affinity    =a,
        # verbose     =True
        verbose     =False,
        msreconnect =10000,
        
        # flush_when_full =True
        flush_when_full =False,
        
        # time_correction   =TimeCorrectionType_dummy,  # Timestamp correction type: TimeCorrectionType_none, TimeCorrectionType_dummy, or TimeCorrectionType_smart (default)
        # time_correction   =TimeCorrectionType_smart,
        # # by default, no need to specify
        
        recv_buffer_size  =0,                        # Operating system socket ringbuffer size in bytes # 0 means default
        # recv_buffer_size  =1024*800,   # 800 KB
        
        reordering_mstime =0                           # Reordering buffer time for Live555 packets in MILLIseconds # 0 means default
        # reordering_mstime =300                         
        )
  
      self.chains.append(chain) # important .. otherwise chain will go out of context and get garbage collected ..
      
      for cc in range(0,self.pardic["replicate"]):
        token  =self.openglthread.connect(slot=cs,window_id=next(win_iter)) # present frames with slot number cs at window win_id
        tokens.append(token)
        cw+=1
      
      cs+=1
      chain.decodingOn() # tell the decoding thread to start its job
      a+=1
    def openValkka(self):
        # setValkkaLogLevel(loglevel_debug)
        core.setLiveOutPacketBuffermaxSize(95000)  # whoa
        # check this out:
        # http://lists.live555.com/pipermail/live-devel/2013-April/016803.html

        self.livethread = LiveThread(  # starts live stream services (using live555)
            name="live_thread",
            # verbose=True,
            verbose=False,
            affinity=self.pardic["live affinity"])

        self.livethread2 = LiveThread(  # second live thread for sending multicast streams
            name="live_thread2",
            # verbose=True,
            verbose=False,
            affinity=self.pardic["live2 affinity"])

        self.openglthread = OpenGLThread(  # starts frame presenting services
            name="mythread",
            n_720p=self.pardic[
                "n_720p"],  # reserve stacks of YUV video frames for various resolutions
            n_1080p=self.pardic["n_1080p"],
            n_1440p=self.pardic["n_1440p"],
            n_4K=self.pardic["n_4K"],
            # naudio  =self.pardic["naudio"], # obsolete
            # verbose =True,
            verbose=False,
            msbuftime=self.pardic["msbuftime"],
            affinity=self.pardic["gl affinity"])

        if (self.openglthread.hadVsync()):
            w = QtWidgets.QMessageBox.warning(
                self, "VBLANK WARNING",
                "Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'"
            )

        tokens = []
        self.chains = []

        a = self.pardic["dec affinity start"]
        mport = self.pardic["mcast_start_port"]

        cw = 0  # widget / window index
        cs = 1  # slot / stream count

        for address in self.addresses:
            # now livethread and openglthread are running
            if (a > self.pardic["dec affinity stop"]):
                a = self.pardic["dec affinity start"]
            print(pre, "openValkka: setting decoder thread on processor", a)

            chain = MulticastFilterchain(  # decoding and branching the stream happens here
                incoming_livethread=self.livethread,
                outgoing_livethread=self.livethread2,
                openglthread=self.openglthread,
                address=address,
                multicast_address=mcast_address,
                multicast_port=mport,
                slot=cs,
                affinity=a,
                # verbose     =True
                verbose=False,
                msreconnect=10000)

            self.chains.append(
                chain
            )  # important .. otherwise chain will go out of context and get garbage collected ..

            # replicate=self.pardic["replicate"]
            replicate = 1

            for cc in range(0, replicate):
                if ("no_qt" in self.pardic):
                    # create our own x-windowses
                    win_id = self.openglthread.createWindow(show=True)
                else:

                    # *** Choose one of the following sections ***

                    # (1) Let Valkka create the windows/widget # use this: we get a window with correct parametrization
                    # win_id =self.openglthread.createWindow(show=False)
                    # fr     =getForeignWidget(self.w, win_id)

                    if (valkka_xwin == False):
                        # (2) Let Qt create the widget
                        fr = TestWidget0(self.w)
                        win_id = int(fr.winId())
                    else:
                        # """
                        # (3) Again, let Valkka create the window, but put on top a translucent widget (that catches mouse gestures)
                        win_id = self.openglthread.createWindow(show=False)
                        widget_pair = WidgetPair(self.w, win_id, TestWidget0)
                        fr = widget_pair.getWidget()
                        self.widget_pairs.append(widget_pair)
                        # """

                    nrow = self.pardic["videos per row"]
                    print(pre, "setupUi: layout index, address : ", cw // nrow,
                          cw % nrow, address)
                    self.lay.addWidget(fr, cw // nrow, cw % nrow)

                    # print(pre,"setupUi: layout index, address : ",cw//4,cw%4,address)
                    # self.lay.addWidget(fr,cw//4,cw%4)

                    self.videoframes.append(fr)

                token = self.openglthread.connect(
                    slot=cs, window_id=win_id
                )  # present frames with slot number cs at window win_id
                tokens.append(token)
                cw += 1

            cs += 1  # TODO: crash when repeating the same slot number ..?

            chain.decodingOn()  # tell the decoding thread to start its job
            a += 1
            mport += 4
  def openValkka(self):
    self.livethread=LiveThread(         # starts live stream services (using live555)
      name   ="live_thread",
      verbose=False
    )

    self.filethread=FileThread(
      name  ="file_thread",
      verbose=False
    )

    self.openglthread=OpenGLThread(     # starts frame presenting services
      name    ="mythread",
      n_720p   =10,
      n_1080p  =10,
      n_1440p  =10,
      n_4K     =10,
      verbose =False,
      msbuftime=100,
      affinity=-1
      )
    
    if (self.openglthread.hadVsync()):
      w=QtWidgets.QMessageBox.warning(self,"VBLANK WARNING","Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'")
    
    cc=1
    
    self.chain=ShmemFilterchain1(       # decoding and branching the stream happens here
      openglthread=self.openglthread,
      slot        =cc,
      # this filterchain creates a shared memory server
      shmem_name             ="test_studio_file_"+str(cc),
      shmem_image_dimensions =(1920//4,1080//4),  # Images passed over shmem are quarter of the full-hd reso
      shmem_image_interval   =1000,               # YUV => RGB interpolation to the small size is done each 1000 milliseconds and passed on to the shmem ringbuffer
      shmem_ringbuffer_size  =10                  # Size of the shmem ringbuffer
      )
    
    shmem_name, n_buffer, shmem_image_dimensions =self.chain.getShmemPars()    
    # print(pre,"shmem_name, n_buffer, n_bytes",shmem_name,n_buffer,n_bytes)
    
    self.process=QValkkaMovementDetectorProcess("process_"+str(cc),shmem_name=shmem_name, n_buffer=n_buffer, image_dimensions=shmem_image_dimensions)
    
    self.process.signals.start_move.connect(self.set_moving_slot)
    self.process.signals.stop_move. connect(self.set_still_slot)
    
    if (valkka_xwin):
      # (1) Let OpenGLThread create the window
      self.win_id      =self.openglthread.createWindow(show=False)
      self.widget_pair =WidgetPair(self.video_area,self.win_id,TestWidget0)
      self.video       =self.widget_pair.getWidget()
    else:
      # (2) Let Qt create the window
      self.video     =QtWidgets.QWidget(self.video_area)
      self.win_id    =int(self.video.winId())
    
    self.video_lay.addWidget(self.video,0,0)
    self.token =self.openglthread.connect(slot=cc,window_id=self.win_id)
    
    self.chain.decodingOn() # tell the decoding thread to start its job
    
    # finally, give the multiprocesses to a qthread that's reading their message pipe
    self.thread =QValkkaThread(processes=[self.process])
    def openValkka(self):
        # some constant values
        # Images passed over shmem are quarter of the full-hd reso
        shmem_image_dimensions = (1920 // 4, 1080 // 4)
        # YUV => RGB interpolation to the small size is done each 1000
        # milliseconds and passed on to the shmem ringbuffer
        shmem_image_interval = 1000
        shmem_ringbuffer_size = 10

        # the very first thing: create & start multiprocesses
        cs = 1
        self.processes = []
        for address in self.addresses:
            shmem_name = "test_studio_" + str(cs)
            process = QValkkaMovementDetectorProcess(
                "process_" + str(cs),
                shmem_name=shmem_name,
                n_buffer=shmem_ringbuffer_size,
                image_dimensions=shmem_image_dimensions)
            self.processes.append(process)

        print(self.processes)

        # Give the multiprocesses to a qthread that's reading their message
        # pipe
        self.thread = QValkkaThread(processes=self.processes)

        # starts the multiprocesses
        self.startProcesses()
        # ..so, forks have been done.  Now we can spawn threads

        self.livethread = LiveThread(  # starts live stream services (using live555)
            name="live_thread",
            verbose=False,
            affinity=self.pardic["live affinity"])

        self.openglthread = OpenGLThread(  # starts frame presenting services
            name="mythread",
            # reserve stacks of YUV video frames for various resolutions
            n_720p=self.pardic["n_720p"],
            n_1080p=self.pardic["n_1080p"],
            n_1440p=self.pardic["n_1440p"],
            n_4K=self.pardic["n_4K"],
            # naudio  =self.pardic["naudio"], # obsolete
            verbose=False,
            msbuftime=self.pardic["msbuftime"],
            affinity=self.pardic["gl affinity"])

        if (self.openglthread.hadVsync()):
            w = QtWidgets.QMessageBox.warning(
                self, "VBLANK WARNING",
                "Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'"
            )

        tokens = []
        self.chains = []
        self.frames = []
        cs = 1
        cc = 0
        a = self.pardic["dec affinity start"]

        for address in self.addresses:
            # now livethread and openglthread are running
            if (a > self.pardic["dec affinity stop"]):
                a = self.pardic["dec affinity start"]
            print(pre, "openValkka: setting decoder thread on processor", a)

            # this filterchain creates a shared memory server
            # identifies shared memory buffer must be same as in the
            # multiprocess
            chain = ShmemFilterchain(  # decoding and branching the stream happens here
                livethread=self.livethread,
                openglthread=self.openglthread,
                address=address,
                slot=cs,
                affinity=a,
                shmem_name="test_studio_" + str(cs),
                shmem_image_dimensions=shmem_image_dimensions,
                shmem_image_interval=shmem_image_interval,
                shmem_ringbuffer_size=shmem_ringbuffer_size,
                msreconnect=10000
                # time_correction   =TimeCorrectionType_smart # this is the default, no need to specify
            )
            self.chains.append(chain)

            if (valkka_xwin):
                win_id = self.openglthread.createWindow(show=False)
                frame = self.Frame(self.w, win_id)
            else:
                frame = self.NativeFrame(self.w)
                win_id = frame.getWindowId()

            # print(pre,"setupUi: layout index, address : ",cc//4,cc%4,address)
            # self.lay.addWidget(frame.widget,cc//4,cc%4)

            nrow = self.pardic["videos per row"]
            print(pre, "setupUi: layout index, address : ", cc // nrow,
                  cc % nrow, address)
            self.lay.addWidget(frame.widget, cc // nrow, cc % nrow)

            self.frames.append(frame)

            token = self.openglthread.connect(slot=cs, window_id=win_id)
            tokens.append(token)

            # take corresponding analyzer multiprocess
            process = self.processes[cc]
            process.createClient(
            )  # creates the shared memory client at the multiprocess
            # connect signals to the nested widget
            process.signals.start_move.connect(frame.set_moving)
            process.signals.stop_move.connect(frame.set_still)

            chain.decodingOn()  # tell the decoding thread to start its job
            cs += 1  # TODO: crash when repeating the same slot number ..?
            a += 1
            cc += 1
Example #8
0
    def openValkka(self):
        self.mvision_process.go()

        if self.mvision_master_process is not None:
            assert (issubclass(self.mvision_master_process.__class__,
                               QShmemProcess))
            self.mvision_master_process.go()

        self.livethread = LiveThread(  # starts live stream services (using live555)
            name="live_thread",
            verbose=False)

        self.filethread = FileThread(name="file_thread", verbose=False)

        self.openglthread = OpenGLThread(  # starts frame presenting services
            name="mythread",
            n_720p=10,
            n_1080p=10,
            n_1440p=10,
            n_4K=10,
            verbose=False,
            msbuftime=100,
            affinity=-1)

        # this filterchain creates a shared memory server
        self.chain = ShmemFilterchain1(  # decoding and branching the stream happens here
            openglthread=self.openglthread,
            slot=1,
            shmem_name=self.shmem_name,
            shmem_image_dimensions=self.shmem_image_dimensions,
            shmem_image_interval=self.shmem_image_interval,
            shmem_ringbuffer_size=self.shmem_ringbuffer_size)

        shmem_name, n_buffer, shmem_image_dimensions = self.chain.getShmemPars(
        )

        self.video = QtWidgets.QWidget(self.video_area)

        if hasattr(self.mvision_process, "analyzer_video_widget_class"):
            # the machine vision class may declare what video widget it wants to use to define the machine vision parameters (line crossing, zone intrusion, etc.)
            self.analyzer_widget = AnalyzerWidget(
                parent=self.video_area,
                analyzer_video_widget_class=self.mvision_process.
                analyzer_video_widget_class)
        else:
            self.analyzer_widget = AnalyzerWidget(parent=self.video_area)

        self.mvision_process.connectAnalyzerWidget(self.analyzer_widget)
        self.analyzer_widget.activate()

        self.win_id = int(self.video.winId())

        self.video_lay.addWidget(self.video, 0, 0)
        self.video_lay.addWidget(self.analyzer_widget, 0, 1)
        self.token = self.openglthread.connect(slot=1, window_id=self.win_id)

        self.chain.decodingOn()  # tell the decoding thread to start its job

        self.mvision_process.activate(
            n_buffer=self.shmem_ringbuffer_size,
            image_dimensions=self.shmem_image_dimensions,
            shmem_name=self.shmem_name)

        if self.mvision_master_process:
            self.mvision_process.setMasterProcess(self.mvision_master_process)
Example #9
0
"""
TODO: FileCacheThread should send initialization frame
- FileCacheThread .. per slot, two substream SetupFrame(s) .. or what?
- Video jerks a bit .. is this because the play edge is too close to the block edge and it runs empty before new frames arrive?
"""



setValkkaLogLevel(loglevel_debug)

def cb(mstime):
    print("mstime callback", mstime)

# create OpenGLThread (for drawing video) and AVThread (for decoding)
glthread = OpenGLThread(name="gl_thread")
ctx = core.FrameFifoContext()
avthread = core.AVThread(
    "avthread",
    glthread.getInput(),
    ctx)
av_in_filter = avthread.getFrameFilter()

avthread.startCall()
avthread.decodingOnCall()

# create an X-window
window_id = glthread.createWindow()

# map frames with slot 1 to that window
glthread.newRenderGroup(window_id)
    def openValkka(self):
        self.valkkafsmanager = ValkkaFSManager(
            self.valkkafs,
            # read = False,   # debugging
            # cache = False,  # debugging
            # write = False   # debugging
        )

        self.playback_controller = PlaybackController(
            calendar_widget=self.calendarwidget,
            timeline_widget=self.timelinewidget,
            valkkafs_manager=self.valkkafsmanager,
            play_button=self.play_button,
            stop_button=self.stop_button,
            zoom_to_fs_button=self.zoom_to_fs_button)

        self.livethread = LiveThread(  # starts live stream services (using live555)
            name="live_thread",
            # verbose=True,
            verbose=False,
            affinity=self.pardic["live affinity"])

        self.openglthread = OpenGLThread(  # starts frame presenting services
            name="mythread",
            # reserve stacks of YUV video frames for various resolutions
            n_720p=self.pardic["n_720p"],
            n_1080p=self.pardic["n_1080p"],
            n_1440p=self.pardic["n_1440p"],
            n_4K=self.pardic["n_4K"],
            # naudio  =self.pardic["naudio"], # obsolete
            verbose=True,
            # verbose=False,
            msbuftime=self.pardic["msbuftime"],
            affinity=self.pardic["gl affinity"])

        if (self.openglthread.hadVsync()):
            w = QtWidgets.QMessageBox.warning(
                self, "VBLANK WARNING",
                "Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'"
            )

        tokens = []
        self.chains = []

        a = self.pardic["dec affinity start"]
        cw = 0  # widget / window index
        cs = 1  # slot / stream count

        for address in self.addresses:
            # now livethread and openglthread are running
            if (a > self.pardic["dec affinity stop"]):
                a = self.pardic["dec affinity start"]
            print(pre, "openValkka: setting decoder thread on processor", a)

            if use_live:
                chain_live = ValkkaFSLiveFilterchain(  # decoding and branching the stream happens here
                    valkkafsmanager=self.valkkafsmanager,
                    id_rec=cs,  # identifies the stream in ValkkaFS
                    livethread=self.livethread,
                    address=address,
                    slot=cs,
                    affinity=a,
                    # verbose     =True
                    verbose=False,
                    msreconnect=10000,
                    # Reordering buffer time for Live555 packets in MILLIseconds # 0 means default
                    reordering_mstime=0
                    # reordering_mstime =300
                )

            rec_slot = cs + 100  # live and rec slot numbers must be kept separated ..

            chain_rec = ValkkaFSFileFilterchain(  # decoding and branching the stream happens here
                valkkafsmanager=self.valkkafsmanager,
                id_rec=cs,  # identifies the stream in ValkkaFS
                slot=rec_slot,
                affinity=a,
                # verbose     =True
                verbose=False)

            # send yuv to OpenGLThread
            if use_live:
                chain_live.connect_to_yuv("yuv_to_opengl_" + str(cs),
                                          self.openglthread.getInput())
            chain_rec.connect_to_yuv("yuv_to_opengl_" + str(cs),
                                     self.openglthread.getInput())

            # important .. otherwise chain will go out of context and get
            # garbage collected ..
            if use_live: self.chains.append(chain_live)
            self.chains.append(chain_rec)

            if ("no_qt" in self.pardic):
                # create our own x-windowses
                win_id = self.openglthread.createWindow(show=True)
                win_id_rec = self.openglthread.createWindow(show=True)

            else:

                # *** Choose one of the following sections ***

                # (1) Let Valkka create the windows/widget # use this: we get a window with correct parametrization
                # win_id =self.openglthread.createWindow(show=False)
                # fr     =getForeignWidget(self.w, win_id)

                if (valkka_xwin == False):
                    # (2) Let Qt create the widget
                    fr = TestWidget0(self.w)
                    win_id = int(fr.winId())

                    fr_rec = TestWidget0(self.rec_video_area)
                    win_id_rec = int(fr_rec.winId())

                else:
                    # """
                    # (3) Again, let Valkka create the window, but put on top a translucent widget (that catches mouse gestures)
                    win_id = self.openglthread.createWindow(show=False)
                    widget_pair = WidgetPair(self.w, win_id, TestWidget0)
                    fr = widget_pair.getWidget()
                    self.widget_pairs.append(widget_pair)

                    win_id_rec = self.openglthread.createWindow(show=False)
                    widget_pair = WidgetPair(self.rec_video_area, win_id_rec,
                                             TestWidget0)
                    fr_rec = widget_pair.getWidget()
                    self.widget_pairs.append(widget_pair)
                    # """

                nrow = self.pardic["videos per row"]
                print(pre, "setupUi: layout index, address : ", cw // nrow,
                      cw % nrow, address)

                self.lay.addWidget(fr, cw // nrow, cw % nrow)
                self.rec_video_area_lay.addWidget(fr_rec, cw // nrow,
                                                  cw % nrow)

                self.videoframes.append(fr)
                self.videoframes.append(fr_rec)

            # present frames with slot number cs at window win_id

            # rec_slot = cs # debug

            print(pre, "setupUi: live:", cs, win_id)
            print(pre, "setupUi: rec :", rec_slot, win_id_rec)

            token = self.openglthread.connect(slot=cs, window_id=win_id)
            tokens.append(token)
            token = self.openglthread.connect(slot=rec_slot,
                                              window_id=win_id_rec)
            tokens.append(token)

            cw += 1
            cs += 1

            if use_live:
                chain_live.decodingOn(
                )  # tell the decoding thread to start its job
            chain_rec.decodingOn()
            a += 1
Example #11
0
    def openValkka(self):

        # RGB Shared memory
        shmem_image_dimensions = (1920 // 4, 1080 // 4)
        shmem_image_interval = 1000
        shmem_rignbuffer_size = 10

        # Frag MP4 Shared memory
        shmem_buffers = 10
        shmem_name = "FragMP4Shmem"
        cellsize = 1024 * 1024 * 3
        timeout = 1000

        cs = 1
        cc = 1
        self.processes = []
        for address in self.addresses:
            shmem_name = "camera" + str(cs)
            # print("shmem name is {} for process number {} ".format(shmem_name, cc))
            process = QValkkaFireDetectorProcess(
                "process" + str(cs),
                shmem_name=shmem_name,
                n_buffer=shmem_rignbuffer_size,
                image_dimensions=shmem_image_dimensions)
            self.processes.append(process)
            cs += 1
        print(self.processes)

        # Give the multiprocesses to a gthread that's reading their message / thread will be listening to the processes !?

        self.thread = QValkkaThread(processes=self.processes)

        # start the multiprocesses
        self.startProcesses()

        # Now that we successfully forked our multiprocesses lets spawn threads

        self.livethread = LiveThread(name="live",
                                     verbose=False,
                                     affinity=self.pardic["live_affinity"])
        self.openglthread = OpenGLThread(
            name="mythread",
            # reserve stacks of YUV video frames for various resolutions
            n_720p=50,
            n_1080p=50,
            n_1440p=50,
            n_4K=50,
            verbose=False,
            msbuftime=100,
            affinity=-1)
        # if (self.openglthread.hadVsync()):
        #     q = QtWidgets.QMessageBox.warning(self,
        #                                       "VBLANK WARNING",
        #                                       "Syncing to vertical refresh enabled \n THIS WILL DESTROY YOUR FRAMERATE\n disable it using 'export vblank_mode=0'")

        tokens = []
        self.chains = []
        self.frames = []

        cs = 1
        cc = 0

        x = 0
        y = 0
        cam_count = 0
        a = self.pardic["dec affinity start"]
        for address in self.addresses:

            # Livethread/openglthread are running
            print('address :', address)
            if (a > self.pardic["dec affinity stop"]):
                a = self.pardic["dec affinity start"]

            chain = VisionAlarmFilterChain(
                # decoding and branching happens here
                livethread=self.livethread,
                openglthread=self.openglthread,
                address=address,
                slot=cs,
                affinity=a,
                shmem_name="camera" + str(cs),
                shmem_image_dimensions=shmem_image_dimensions,
                shmem_image_interval=shmem_image_interval,
                shmem_ringbuffer_size=shmem_rignbuffer_size,
                msreconnect=1000,
                frag_shmem_buffers=shmem_buffers,
                frag_shmem_name=shmem_name,
                frag_shmem_cellsize=cellsize,
                frag_shmem_timeout=timeout,
            )
            self.chains.append(chain)

            win_id = self.openglthread.createWindow(show=False)
            frame = self.QtFrame(self.w, win_id)

            # print('setting up layout')
            if y > 1:
                x = 1
                y = 0
            self.wlay.addWidget(frame.widget, x, y)
            y += 1
            token = self.openglthread.connect(slot=cs, window_id=win_id)
            tokens.append(token)

            # take corresponding multiprocess
            process = self.processes[cc]
            process.createClient(
            )  # creates the shared memory client at the multiprocess
            # connect signals to the nested widget

            process.signals.Fire_detected.connect(self.addAlert)

            chain.decodingOn()  # start the decoding thread
            cs += 1
            a += 1
            cc += 1
            # FragMP4 shmem client
            client = FragMP4ShmemClient(name=shmem_name,
                                        n_ringbuffer=shmem_buffers,
                                        n_size=cellsize,
                                        mstimeout=timeout,
                                        verbose=False)
    def openValkka(self):
        self.livethread = LiveThread(  # starts live stream services (using live555)
            name="live_thread",
            # verbose=True,
            verbose=False,
            affinity=self.pardic["live affinity"])

        self.openglthread = OpenGLThread(  # starts frame presenting services
            name="mythread",
            n_720p=self.pardic[
                "n_720p"],  # reserve stacks of YUV video frames for various resolutions
            n_1080p=self.pardic["n_1080p"],
            n_1440p=self.pardic["n_1440p"],
            n_4K=self.pardic["n_4K"],
            # naudio  =self.pardic["naudio"], # obsolete
            # verbose =True,
            verbose=False,
            msbuftime=self.pardic["msbuftime"],
            affinity=self.pardic["gl affinity"],
            x_connection=":0.0"
            # x_connection =":0.1" # works .. video appears on the other xscreen
        )
        """ # this results in a segfault
    print("> starting second OpenGLThread")
    # testing: start another OpenGLThread
    self.openglthread2=OpenGLThread(     # starts frame presenting services
      name    ="mythread2",
      n_720p   =self.pardic["n_720p"],   # reserve stacks of YUV video frames for various resolutions
      n_1080p  =self.pardic["n_1080p"],
      n_1440p  =self.pardic["n_1440p"],
      n_4K     =self.pardic["n_4K"],
      # naudio  =self.pardic["naudio"], # obsolete
      # verbose =True,
      verbose =False,
      msbuftime=self.pardic["msbuftime"],
      affinity=self.pardic["gl affinity"],
      x_connection =":0.1" # works .. video appears on the other xscreen
      )
    print("> second OpenGLThread started")
    """

        if (self.openglthread.hadVsync()):
            w = QtWidgets.QMessageBox.warning(
                self, "VBLANK WARNING",
                "Syncing to vertical refresh enabled\n THIS WILL DESTROY YOUR FRAMERATE\n Disable it with 'export vblank_mode=0' for nvidia proprietary drivers, use 'export __GL_SYNC_TO_VBLANK=0'"
            )

        tokens = []
        self.chains = []

        a = self.pardic["dec affinity start"]
        cw = 0  # widget / window index
        cs = 1  # slot / stream count

        ntotal = len(self.addresses) * self.pardic["replicate"]
        nrow = self.pardic["videos per row"]
        ncol = max((ntotal // self.pardic["videos per row"]) + 1, 2)

        for address in self.addresses:
            # now livethread and openglthread are running
            if (a > self.pardic["dec affinity stop"]):
                a = self.pardic["dec affinity start"]
            print(pre, "openValkka: setting decoder thread on processor", a)

            chain = BasicFilterchain(  # decoding and branching the stream happens here
                livethread=self.livethread,
                openglthread=self.openglthread,
                address=address,
                slot=cs,
                affinity=a,
                # verbose     =True
                verbose=False,
                msreconnect=10000,

                # flush_when_full =True
                flush_when_full=False,

                # time_correction   =TimeCorrectionType_dummy,  # Timestamp correction type: TimeCorrectionType_none, TimeCorrectionType_dummy, or TimeCorrectionType_smart (default)
                time_correction=TimeCorrectionType_smart,
                recv_buffer_size=
                0,  # Operating system socket ringbuffer size in bytes # 0 means default
                # recv_buffer_size  =1024*800,   # 800 KB
                reordering_mstime=
                0  # Reordering buffer time for Live555 packets in MILLIseconds # 0 means default
                # reordering_mstime =300
            )

            self.chains.append(
                chain
            )  # important .. otherwise chain will go out of context and get garbage collected ..

            for cc in range(0, self.pardic["replicate"]):
                if ("no_qt" in self.pardic):
                    # create our own x-windowses
                    win_id = self.openglthread.createWindow(show=True)
                else:

                    # *** Choose one of the following sections ***

                    # (1) Let Valkka create the windows/widget # use this: we get a window with correct parametrization
                    # win_id =self.openglthread.createWindow(show=False)
                    # fr     =getForeignWidget(self.w, win_id)

                    if (valkka_xwin == False):
                        # (2) Let Qt create the widget
                        fr = TestWidget0(None)
                        win_id = int(fr.winId())
                    else:
                        # """
                        # (3) Again, let Valkka create the window, but put on top a translucent widget (that catches mouse gestures)
                        win_id = self.openglthread.createWindow(show=False)
                        widget_pair = WidgetPair(None, win_id, TestWidget0)
                        fr = widget_pair.getWidget()
                        self.widget_pairs.append(widget_pair)
                        # """

                    print(pre, "setupUi: layout index, address : ", cw // nrow,
                          cw % nrow, address)
                    # self.lay.addWidget(fr,cw//nrow,cw%nrow) # floating windows instead

                    container = VideoContainer(None, fr, n=0)
                    container.getWidget().setGeometry(
                        self.desktop_handler.getGeometry(
                            nrow, ncol, cw % nrow, cw // nrow))
                    container.getWidget().show()

                    self.videoframes.append(container)

                token = self.openglthread.connect(
                    slot=cs, window_id=win_id
                )  # present frames with slot number cs at window win_id
                tokens.append(token)
                cw += 1

            cs += 1  # TODO: crash when repeating the same slot number ..?

            chain.decodingOn()  # tell the decoding thread to start its job
            a += 1
Example #13
0
from valkka.api2 import BasicFilterchain
"""<rtf>
Instantiating the API level 2 LiveThread starts running the underlying cpp thread:
<rtf>"""
livethread = LiveThread(  # starts live stream services (using live555)
    name="live_thread",
    verbose=False,
    affinity=-1)
"""<rtf>
Same goes for OpenGLThread:
<rtf>"""
openglthread = OpenGLThread(
    name="glthread",
    n_720p=20,  # reserve stacks of YUV video frames for various resolutions
    n_1080p=20,
    n_1440p=0,
    n_4K=0,
    verbose=False,
    msbuftime=100,
    affinity=-1)
"""<rtf>
The filterchain and decoder (AVThread) are encapsulated into a single class.  Instantiating starts the AVThread (decoding is off by default):
<rtf>"""
chain = BasicFilterchain(  # decoding and branching the stream happens here
    livethread=livethread,
    openglthread=openglthread,
    address="rtsp://*****:*****@192.168.1.41",
    slot=1,
    affinity=-1,
    verbose=False,
    msreconnect=10000  # if no frames in ten seconds, try to reconnect